mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 09:14:19 +08:00
91c6cc9b5c
Complete the renaming from "flush" to "invalidate" across both tmem frontends (cleancache and frontswap) and both tmem backends (Xen and zcache), as required by akpm. This change is completely cosmetic. [v10: no change] [v9: akpm@linux-foundation.org: change "flush" to "invalidate", part 3] Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jan Beulich <JBeulich@novell.com> Acked-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Chris Mason <chris.mason@oracle.com> Cc: Rik Riel <riel@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> [v11: Remove the frontswap part] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
411 lines
9.9 KiB
C
411 lines
9.9 KiB
C
/*
|
|
* Xen implementation for transcendent memory (tmem)
|
|
*
|
|
* Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
|
|
* Author: Dan Magenheimer
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/init.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/module.h>
|
|
#include <linux/cleancache.h>
|
|
|
|
/* temporary ifdef until include/linux/frontswap.h is upstream */
|
|
#ifdef CONFIG_FRONTSWAP
|
|
#include <linux/frontswap.h>
|
|
#endif
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/page.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
#define TMEM_CONTROL 0
|
|
#define TMEM_NEW_POOL 1
|
|
#define TMEM_DESTROY_POOL 2
|
|
#define TMEM_NEW_PAGE 3
|
|
#define TMEM_PUT_PAGE 4
|
|
#define TMEM_GET_PAGE 5
|
|
#define TMEM_FLUSH_PAGE 6
|
|
#define TMEM_FLUSH_OBJECT 7
|
|
#define TMEM_READ 8
|
|
#define TMEM_WRITE 9
|
|
#define TMEM_XCHG 10
|
|
|
|
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
|
|
#define TMEM_POOL_PERSIST 1
|
|
#define TMEM_POOL_SHARED 2
|
|
#define TMEM_POOL_PAGESIZE_SHIFT 4
|
|
#define TMEM_VERSION_SHIFT 24
|
|
|
|
|
|
struct tmem_pool_uuid {
|
|
u64 uuid_lo;
|
|
u64 uuid_hi;
|
|
};
|
|
|
|
struct tmem_oid {
|
|
u64 oid[3];
|
|
};
|
|
|
|
#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
|
|
|
|
/* flags for tmem_ops.new_pool */
|
|
#define TMEM_POOL_PERSIST 1
|
|
#define TMEM_POOL_SHARED 2
|
|
|
|
/* xen tmem foundation ops/hypercalls */
|
|
|
|
static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
|
|
u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
|
|
{
|
|
struct tmem_op op;
|
|
int rc = 0;
|
|
|
|
op.cmd = tmem_cmd;
|
|
op.pool_id = tmem_pool;
|
|
op.u.gen.oid[0] = oid.oid[0];
|
|
op.u.gen.oid[1] = oid.oid[1];
|
|
op.u.gen.oid[2] = oid.oid[2];
|
|
op.u.gen.index = index;
|
|
op.u.gen.tmem_offset = tmem_offset;
|
|
op.u.gen.pfn_offset = pfn_offset;
|
|
op.u.gen.len = len;
|
|
set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
|
|
rc = HYPERVISOR_tmem_op(&op);
|
|
return rc;
|
|
}
|
|
|
|
static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
|
|
u32 flags, unsigned long pagesize)
|
|
{
|
|
struct tmem_op op;
|
|
int rc = 0, pageshift;
|
|
|
|
for (pageshift = 0; pagesize != 1; pageshift++)
|
|
pagesize >>= 1;
|
|
flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
|
|
flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
|
|
op.cmd = TMEM_NEW_POOL;
|
|
op.u.new.uuid[0] = uuid.uuid_lo;
|
|
op.u.new.uuid[1] = uuid.uuid_hi;
|
|
op.u.new.flags = flags;
|
|
rc = HYPERVISOR_tmem_op(&op);
|
|
return rc;
|
|
}
|
|
|
|
/* xen generic tmem ops */
|
|
|
|
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
|
u32 index, unsigned long pfn)
|
|
{
|
|
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
|
|
|
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
|
|
gmfn, 0, 0, 0);
|
|
}
|
|
|
|
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
|
|
u32 index, unsigned long pfn)
|
|
{
|
|
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
|
|
|
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
|
|
gmfn, 0, 0, 0);
|
|
}
|
|
|
|
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
|
|
{
|
|
return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
|
|
0, 0, 0, 0);
|
|
}
|
|
|
|
static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
|
|
{
|
|
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
|
|
}
|
|
|
|
int tmem_enabled __read_mostly;
|
|
EXPORT_SYMBOL(tmem_enabled);
|
|
|
|
static int __init enable_tmem(char *s)
|
|
{
|
|
tmem_enabled = 1;
|
|
return 1;
|
|
}
|
|
|
|
__setup("tmem", enable_tmem);
|
|
|
|
#ifdef CONFIG_CLEANCACHE
|
|
static int xen_tmem_destroy_pool(u32 pool_id)
|
|
{
|
|
struct tmem_oid oid = { { 0 } };
|
|
|
|
return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
|
|
}
|
|
|
|
/* cleancache ops */
|
|
|
|
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
|
|
pgoff_t index, struct page *page)
|
|
{
|
|
u32 ind = (u32) index;
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
unsigned long pfn = page_to_pfn(page);
|
|
|
|
if (pool < 0)
|
|
return;
|
|
if (ind != index)
|
|
return;
|
|
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
|
(void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
|
|
}
|
|
|
|
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
|
pgoff_t index, struct page *page)
|
|
{
|
|
u32 ind = (u32) index;
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
unsigned long pfn = page_to_pfn(page);
|
|
int ret;
|
|
|
|
/* translate return values to linux semantics */
|
|
if (pool < 0)
|
|
return -1;
|
|
if (ind != index)
|
|
return -1;
|
|
ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
|
|
if (ret == 1)
|
|
return 0;
|
|
else
|
|
return -1;
|
|
}
|
|
|
|
static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
|
|
pgoff_t index)
|
|
{
|
|
u32 ind = (u32) index;
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
if (pool < 0)
|
|
return;
|
|
if (ind != index)
|
|
return;
|
|
(void)xen_tmem_flush_page((u32)pool, oid, ind);
|
|
}
|
|
|
|
static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
|
|
{
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
if (pool < 0)
|
|
return;
|
|
(void)xen_tmem_flush_object((u32)pool, oid);
|
|
}
|
|
|
|
static void tmem_cleancache_flush_fs(int pool)
|
|
{
|
|
if (pool < 0)
|
|
return;
|
|
(void)xen_tmem_destroy_pool((u32)pool);
|
|
}
|
|
|
|
static int tmem_cleancache_init_fs(size_t pagesize)
|
|
{
|
|
struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
|
|
|
|
return xen_tmem_new_pool(uuid_private, 0, pagesize);
|
|
}
|
|
|
|
static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
|
|
{
|
|
struct tmem_pool_uuid shared_uuid;
|
|
|
|
shared_uuid.uuid_lo = *(u64 *)uuid;
|
|
shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
|
|
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
|
|
}
|
|
|
|
static int use_cleancache = 1;
|
|
|
|
static int __init no_cleancache(char *s)
|
|
{
|
|
use_cleancache = 0;
|
|
return 1;
|
|
}
|
|
|
|
__setup("nocleancache", no_cleancache);
|
|
|
|
static struct cleancache_ops tmem_cleancache_ops = {
|
|
.put_page = tmem_cleancache_put_page,
|
|
.get_page = tmem_cleancache_get_page,
|
|
.invalidate_page = tmem_cleancache_flush_page,
|
|
.invalidate_inode = tmem_cleancache_flush_inode,
|
|
.invalidate_fs = tmem_cleancache_flush_fs,
|
|
.init_shared_fs = tmem_cleancache_init_shared_fs,
|
|
.init_fs = tmem_cleancache_init_fs
|
|
};
|
|
#endif
|
|
|
|
#ifdef CONFIG_FRONTSWAP
|
|
/* frontswap tmem operations */
|
|
|
|
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
|
|
static int tmem_frontswap_poolid;
|
|
|
|
/*
|
|
* Swizzling increases objects per swaptype, increasing tmem concurrency
|
|
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
|
|
*/
|
|
#define SWIZ_BITS 4
|
|
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
|
|
#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
|
|
#define iswiz(_ind) (_ind >> SWIZ_BITS)
|
|
|
|
static inline struct tmem_oid oswiz(unsigned type, u32 ind)
|
|
{
|
|
struct tmem_oid oid = { .oid = { 0 } };
|
|
oid.oid[0] = _oswiz(type, ind);
|
|
return oid;
|
|
}
|
|
|
|
/* returns 0 if the page was successfully put into frontswap, -1 if not */
|
|
static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
|
|
struct page *page)
|
|
{
|
|
u64 ind64 = (u64)offset;
|
|
u32 ind = (u32)offset;
|
|
unsigned long pfn = page_to_pfn(page);
|
|
int pool = tmem_frontswap_poolid;
|
|
int ret;
|
|
|
|
if (pool < 0)
|
|
return -1;
|
|
if (ind64 != ind)
|
|
return -1;
|
|
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
|
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
|
|
/* translate Xen tmem return values to linux semantics */
|
|
if (ret == 1)
|
|
return 0;
|
|
else
|
|
return -1;
|
|
}
|
|
|
|
/*
|
|
* returns 0 if the page was successfully gotten from frontswap, -1 if
|
|
* was not present (should never happen!)
|
|
*/
|
|
static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
|
|
struct page *page)
|
|
{
|
|
u64 ind64 = (u64)offset;
|
|
u32 ind = (u32)offset;
|
|
unsigned long pfn = page_to_pfn(page);
|
|
int pool = tmem_frontswap_poolid;
|
|
int ret;
|
|
|
|
if (pool < 0)
|
|
return -1;
|
|
if (ind64 != ind)
|
|
return -1;
|
|
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
|
|
/* translate Xen tmem return values to linux semantics */
|
|
if (ret == 1)
|
|
return 0;
|
|
else
|
|
return -1;
|
|
}
|
|
|
|
/* flush a single page from frontswap */
|
|
static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
|
|
{
|
|
u64 ind64 = (u64)offset;
|
|
u32 ind = (u32)offset;
|
|
int pool = tmem_frontswap_poolid;
|
|
|
|
if (pool < 0)
|
|
return;
|
|
if (ind64 != ind)
|
|
return;
|
|
(void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
|
|
}
|
|
|
|
/* flush all pages from the passed swaptype */
|
|
static void tmem_frontswap_flush_area(unsigned type)
|
|
{
|
|
int pool = tmem_frontswap_poolid;
|
|
int ind;
|
|
|
|
if (pool < 0)
|
|
return;
|
|
for (ind = SWIZ_MASK; ind >= 0; ind--)
|
|
(void)xen_tmem_flush_object(pool, oswiz(type, ind));
|
|
}
|
|
|
|
static void tmem_frontswap_init(unsigned ignored)
|
|
{
|
|
struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
|
|
|
|
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
|
|
if (tmem_frontswap_poolid < 0)
|
|
tmem_frontswap_poolid =
|
|
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
|
|
}
|
|
|
|
static int __initdata use_frontswap = 1;
|
|
|
|
static int __init no_frontswap(char *s)
|
|
{
|
|
use_frontswap = 0;
|
|
return 1;
|
|
}
|
|
|
|
__setup("nofrontswap", no_frontswap);
|
|
|
|
static struct frontswap_ops tmem_frontswap_ops = {
|
|
.put_page = tmem_frontswap_put_page,
|
|
.get_page = tmem_frontswap_get_page,
|
|
.invalidate_page = tmem_frontswap_flush_page,
|
|
.invalidate_area = tmem_frontswap_flush_area,
|
|
.init = tmem_frontswap_init
|
|
};
|
|
#endif
|
|
|
|
static int __init xen_tmem_init(void)
|
|
{
|
|
if (!xen_domain())
|
|
return 0;
|
|
#ifdef CONFIG_FRONTSWAP
|
|
if (tmem_enabled && use_frontswap) {
|
|
char *s = "";
|
|
struct frontswap_ops old_ops =
|
|
frontswap_register_ops(&tmem_frontswap_ops);
|
|
|
|
tmem_frontswap_poolid = -1;
|
|
if (old_ops.init != NULL)
|
|
s = " (WARNING: frontswap_ops overridden)";
|
|
printk(KERN_INFO "frontswap enabled, RAM provided by "
|
|
"Xen Transcendent Memory\n");
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_CLEANCACHE
|
|
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
|
|
if (tmem_enabled && use_cleancache) {
|
|
char *s = "";
|
|
struct cleancache_ops old_ops =
|
|
cleancache_register_ops(&tmem_cleancache_ops);
|
|
if (old_ops.init_fs != NULL)
|
|
s = " (WARNING: cleancache_ops overridden)";
|
|
printk(KERN_INFO "cleancache enabled, RAM provided by "
|
|
"Xen Transcendent Memory%s\n", s);
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
module_init(xen_tmem_init)
|