mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
7512102cf6
When moving tasks from old memcg (with move_charge_at_immigrate on new memcg), followed by removal of old memcg, hit General Protection Fault in mem_cgroup_lru_del_list() (called from release_pages called from free_pages_and_swap_cache from tlb_flush_mmu from tlb_finish_mmu from exit_mmap from mmput from exit_mm from do_exit). Somewhat reproducible, takes a few hours: the old struct mem_cgroup has been freed and poisoned by SLAB_DEBUG, but mem_cgroup_lru_del_list() is still trying to update its stats, and take page off lru before freeing. A task, or a charge, or a page on lru: each secures a memcg against removal. In this case, the last task has been moved out of the old memcg, and it is exiting: anonymous pages are uncharged one by one from the memcg, as they are zapped from its pagetables, so the charge gets down to 0; but the pages themselves are queued in an mmu_gather for freeing. Most of those pages will be on lru (and force_empty is careful to lru_add_drain_all, to add pages from pagevec to lru first), but not necessarily all: perhaps some have been isolated for page reclaim, perhaps some isolated for other reasons. So, force_empty may find no task, no charge and no page on lru, and let the removal proceed. There would still be no problem if these pages were immediately freed; but typically (and the put_page_testzero protocol demands it) they have to be added back to lru before they are found freeable, then removed from lru and freed. We don't see the issue when adding, because the mem_cgroup_iter() loops keep their own reference to the memcg being scanned; but when it comes to mem_cgroup_lru_del_list(). I believe this was not an issue in v3.2: there, PageCgroupAcctLRU and PageCgroupUsed flags were used (like a trick with mirrors) to deflect view of pc->mem_cgroup to the stable root_mem_cgroup when neither set.38c5d72f3e
("memcg: simplify LRU handling by new rule") mercifully removed those convolutions, but left this General Protection Fault. But it's surprisingly easy to restore the old behaviour: just check PageCgroupUsed in mem_cgroup_lru_add_list() (which decides on which lruvec to add), and reset pc to root_mem_cgroup if page is uncharged. A risky change? just going back to how it worked before; testing, and an audit of uses of pc->mem_cgroup, show no problem. And there's a nice bonus: with mem_cgroup_lru_add_list() itself making sure that an uncharged page goes to root lru, mem_cgroup_reset_owner() no longer has any purpose, and we can safely revert4e5f01c2b9
("memcg: clear pc->mem_cgroup if necessary"). Calling update_page_reclaim_stat() after add_page_to_lru_list() in swap.c is not strictly necessary: the lru_lock there, with RCU before memcg structures are freed, makes mem_cgroup_get_reclaim_stat_from_page safe without that; but it seems cleaner to rely on one dependency less. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2031 lines
55 KiB
C
2031 lines
55 KiB
C
/*
|
|
* Memory merging support.
|
|
*
|
|
* This code enables dynamic sharing of identical pages found in different
|
|
* memory areas, even if they are not shared by fork()
|
|
*
|
|
* Copyright (C) 2008-2009 Red Hat, Inc.
|
|
* Authors:
|
|
* Izik Eidus
|
|
* Andrea Arcangeli
|
|
* Chris Wright
|
|
* Hugh Dickins
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/ksm.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/oom.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* A few notes about the KSM scanning process,
|
|
* to make it easier to understand the data structures below:
|
|
*
|
|
* In order to reduce excessive scanning, KSM sorts the memory pages by their
|
|
* contents into a data structure that holds pointers to the pages' locations.
|
|
*
|
|
* Since the contents of the pages may change at any moment, KSM cannot just
|
|
* insert the pages into a normal sorted tree and expect it to find anything.
|
|
* Therefore KSM uses two data structures - the stable and the unstable tree.
|
|
*
|
|
* The stable tree holds pointers to all the merged pages (ksm pages), sorted
|
|
* by their contents. Because each such page is write-protected, searching on
|
|
* this tree is fully assured to be working (except when pages are unmapped),
|
|
* and therefore this tree is called the stable tree.
|
|
*
|
|
* In addition to the stable tree, KSM uses a second data structure called the
|
|
* unstable tree: this tree holds pointers to pages which have been found to
|
|
* be "unchanged for a period of time". The unstable tree sorts these pages
|
|
* by their contents, but since they are not write-protected, KSM cannot rely
|
|
* upon the unstable tree to work correctly - the unstable tree is liable to
|
|
* be corrupted as its contents are modified, and so it is called unstable.
|
|
*
|
|
* KSM solves this problem by several techniques:
|
|
*
|
|
* 1) The unstable tree is flushed every time KSM completes scanning all
|
|
* memory areas, and then the tree is rebuilt again from the beginning.
|
|
* 2) KSM will only insert into the unstable tree, pages whose hash value
|
|
* has not changed since the previous scan of all memory areas.
|
|
* 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
|
|
* colors of the nodes and not on their contents, assuring that even when
|
|
* the tree gets "corrupted" it won't get out of balance, so scanning time
|
|
* remains the same (also, searching and inserting nodes in an rbtree uses
|
|
* the same algorithm, so we have no overhead when we flush and rebuild).
|
|
* 4) KSM never flushes the stable tree, which means that even if it were to
|
|
* take 10 attempts to find a page in the unstable tree, once it is found,
|
|
* it is secured in the stable tree. (When we scan a new page, we first
|
|
* compare it against the stable tree, and then against the unstable tree.)
|
|
*/
|
|
|
|
/**
|
|
* struct mm_slot - ksm information per mm that is being scanned
|
|
* @link: link to the mm_slots hash list
|
|
* @mm_list: link into the mm_slots list, rooted in ksm_mm_head
|
|
* @rmap_list: head for this mm_slot's singly-linked list of rmap_items
|
|
* @mm: the mm that this information is valid for
|
|
*/
|
|
struct mm_slot {
|
|
struct hlist_node link;
|
|
struct list_head mm_list;
|
|
struct rmap_item *rmap_list;
|
|
struct mm_struct *mm;
|
|
};
|
|
|
|
/**
|
|
* struct ksm_scan - cursor for scanning
|
|
* @mm_slot: the current mm_slot we are scanning
|
|
* @address: the next address inside that to be scanned
|
|
* @rmap_list: link to the next rmap to be scanned in the rmap_list
|
|
* @seqnr: count of completed full scans (needed when removing unstable node)
|
|
*
|
|
* There is only the one ksm_scan instance of this cursor structure.
|
|
*/
|
|
struct ksm_scan {
|
|
struct mm_slot *mm_slot;
|
|
unsigned long address;
|
|
struct rmap_item **rmap_list;
|
|
unsigned long seqnr;
|
|
};
|
|
|
|
/**
|
|
* struct stable_node - node of the stable rbtree
|
|
* @node: rb node of this ksm page in the stable tree
|
|
* @hlist: hlist head of rmap_items using this ksm page
|
|
* @kpfn: page frame number of this ksm page
|
|
*/
|
|
struct stable_node {
|
|
struct rb_node node;
|
|
struct hlist_head hlist;
|
|
unsigned long kpfn;
|
|
};
|
|
|
|
/**
|
|
* struct rmap_item - reverse mapping item for virtual addresses
|
|
* @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
|
|
* @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
|
|
* @mm: the memory structure this rmap_item is pointing into
|
|
* @address: the virtual address this rmap_item tracks (+ flags in low bits)
|
|
* @oldchecksum: previous checksum of the page at that virtual address
|
|
* @node: rb node of this rmap_item in the unstable tree
|
|
* @head: pointer to stable_node heading this list in the stable tree
|
|
* @hlist: link into hlist of rmap_items hanging off that stable_node
|
|
*/
|
|
struct rmap_item {
|
|
struct rmap_item *rmap_list;
|
|
struct anon_vma *anon_vma; /* when stable */
|
|
struct mm_struct *mm;
|
|
unsigned long address; /* + low bits used for flags below */
|
|
unsigned int oldchecksum; /* when unstable */
|
|
union {
|
|
struct rb_node node; /* when node of unstable tree */
|
|
struct { /* when listed from stable tree */
|
|
struct stable_node *head;
|
|
struct hlist_node hlist;
|
|
};
|
|
};
|
|
};
|
|
|
|
#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
|
|
#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
|
|
#define STABLE_FLAG 0x200 /* is listed from the stable tree */
|
|
|
|
/* The stable and unstable tree heads */
|
|
static struct rb_root root_stable_tree = RB_ROOT;
|
|
static struct rb_root root_unstable_tree = RB_ROOT;
|
|
|
|
#define MM_SLOTS_HASH_SHIFT 10
|
|
#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
|
|
static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
|
|
|
|
static struct mm_slot ksm_mm_head = {
|
|
.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
|
|
};
|
|
static struct ksm_scan ksm_scan = {
|
|
.mm_slot = &ksm_mm_head,
|
|
};
|
|
|
|
static struct kmem_cache *rmap_item_cache;
|
|
static struct kmem_cache *stable_node_cache;
|
|
static struct kmem_cache *mm_slot_cache;
|
|
|
|
/* The number of nodes in the stable tree */
|
|
static unsigned long ksm_pages_shared;
|
|
|
|
/* The number of page slots additionally sharing those nodes */
|
|
static unsigned long ksm_pages_sharing;
|
|
|
|
/* The number of nodes in the unstable tree */
|
|
static unsigned long ksm_pages_unshared;
|
|
|
|
/* The number of rmap_items in use: to calculate pages_volatile */
|
|
static unsigned long ksm_rmap_items;
|
|
|
|
/* Number of pages ksmd should scan in one batch */
|
|
static unsigned int ksm_thread_pages_to_scan = 100;
|
|
|
|
/* Milliseconds ksmd should sleep between batches */
|
|
static unsigned int ksm_thread_sleep_millisecs = 20;
|
|
|
|
#define KSM_RUN_STOP 0
|
|
#define KSM_RUN_MERGE 1
|
|
#define KSM_RUN_UNMERGE 2
|
|
static unsigned int ksm_run = KSM_RUN_STOP;
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
|
|
static DEFINE_MUTEX(ksm_thread_mutex);
|
|
static DEFINE_SPINLOCK(ksm_mmlist_lock);
|
|
|
|
#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
|
|
sizeof(struct __struct), __alignof__(struct __struct),\
|
|
(__flags), NULL)
|
|
|
|
static int __init ksm_slab_init(void)
|
|
{
|
|
rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
|
|
if (!rmap_item_cache)
|
|
goto out;
|
|
|
|
stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
|
|
if (!stable_node_cache)
|
|
goto out_free1;
|
|
|
|
mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
|
|
if (!mm_slot_cache)
|
|
goto out_free2;
|
|
|
|
return 0;
|
|
|
|
out_free2:
|
|
kmem_cache_destroy(stable_node_cache);
|
|
out_free1:
|
|
kmem_cache_destroy(rmap_item_cache);
|
|
out:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
static void __init ksm_slab_free(void)
|
|
{
|
|
kmem_cache_destroy(mm_slot_cache);
|
|
kmem_cache_destroy(stable_node_cache);
|
|
kmem_cache_destroy(rmap_item_cache);
|
|
mm_slot_cache = NULL;
|
|
}
|
|
|
|
static inline struct rmap_item *alloc_rmap_item(void)
|
|
{
|
|
struct rmap_item *rmap_item;
|
|
|
|
rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
|
|
if (rmap_item)
|
|
ksm_rmap_items++;
|
|
return rmap_item;
|
|
}
|
|
|
|
static inline void free_rmap_item(struct rmap_item *rmap_item)
|
|
{
|
|
ksm_rmap_items--;
|
|
rmap_item->mm = NULL; /* debug safety */
|
|
kmem_cache_free(rmap_item_cache, rmap_item);
|
|
}
|
|
|
|
static inline struct stable_node *alloc_stable_node(void)
|
|
{
|
|
return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void free_stable_node(struct stable_node *stable_node)
|
|
{
|
|
kmem_cache_free(stable_node_cache, stable_node);
|
|
}
|
|
|
|
static inline struct mm_slot *alloc_mm_slot(void)
|
|
{
|
|
if (!mm_slot_cache) /* initialization failed */
|
|
return NULL;
|
|
return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void free_mm_slot(struct mm_slot *mm_slot)
|
|
{
|
|
kmem_cache_free(mm_slot_cache, mm_slot);
|
|
}
|
|
|
|
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
|
|
{
|
|
struct mm_slot *mm_slot;
|
|
struct hlist_head *bucket;
|
|
struct hlist_node *node;
|
|
|
|
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
|
|
hlist_for_each_entry(mm_slot, node, bucket, link) {
|
|
if (mm == mm_slot->mm)
|
|
return mm_slot;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static void insert_to_mm_slots_hash(struct mm_struct *mm,
|
|
struct mm_slot *mm_slot)
|
|
{
|
|
struct hlist_head *bucket;
|
|
|
|
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
|
|
mm_slot->mm = mm;
|
|
hlist_add_head(&mm_slot->link, bucket);
|
|
}
|
|
|
|
static inline int in_stable_tree(struct rmap_item *rmap_item)
|
|
{
|
|
return rmap_item->address & STABLE_FLAG;
|
|
}
|
|
|
|
/*
|
|
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
|
|
* page tables after it has passed through ksm_exit() - which, if necessary,
|
|
* takes mmap_sem briefly to serialize against them. ksm_exit() does not set
|
|
* a special flag: they can just back out as soon as mm_users goes to zero.
|
|
* ksm_test_exit() is used throughout to make this test for exit: in some
|
|
* places for correctness, in some places just to avoid unnecessary work.
|
|
*/
|
|
static inline bool ksm_test_exit(struct mm_struct *mm)
|
|
{
|
|
return atomic_read(&mm->mm_users) == 0;
|
|
}
|
|
|
|
/*
|
|
* We use break_ksm to break COW on a ksm page: it's a stripped down
|
|
*
|
|
* if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
|
|
* put_page(page);
|
|
*
|
|
* but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
|
|
* in case the application has unmapped and remapped mm,addr meanwhile.
|
|
* Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
|
|
* mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
|
|
*/
|
|
static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
struct page *page;
|
|
int ret = 0;
|
|
|
|
do {
|
|
cond_resched();
|
|
page = follow_page(vma, addr, FOLL_GET);
|
|
if (IS_ERR_OR_NULL(page))
|
|
break;
|
|
if (PageKsm(page))
|
|
ret = handle_mm_fault(vma->vm_mm, vma, addr,
|
|
FAULT_FLAG_WRITE);
|
|
else
|
|
ret = VM_FAULT_WRITE;
|
|
put_page(page);
|
|
} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
|
|
/*
|
|
* We must loop because handle_mm_fault() may back out if there's
|
|
* any difficulty e.g. if pte accessed bit gets updated concurrently.
|
|
*
|
|
* VM_FAULT_WRITE is what we have been hoping for: it indicates that
|
|
* COW has been broken, even if the vma does not permit VM_WRITE;
|
|
* but note that a concurrent fault might break PageKsm for us.
|
|
*
|
|
* VM_FAULT_SIGBUS could occur if we race with truncation of the
|
|
* backing file, which also invalidates anonymous pages: that's
|
|
* okay, that truncation will have unmapped the PageKsm for us.
|
|
*
|
|
* VM_FAULT_OOM: at the time of writing (late July 2009), setting
|
|
* aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
|
|
* current task has TIF_MEMDIE set, and will be OOM killed on return
|
|
* to user; and ksmd, having no mm, would never be chosen for that.
|
|
*
|
|
* But if the mm is in a limited mem_cgroup, then the fault may fail
|
|
* with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
|
|
* even ksmd can fail in this way - though it's usually breaking ksm
|
|
* just to undo a merge it made a moment before, so unlikely to oom.
|
|
*
|
|
* That's a pity: we might therefore have more kernel pages allocated
|
|
* than we're counting as nodes in the stable tree; but ksm_do_scan
|
|
* will retry to break_cow on each pass, so should recover the page
|
|
* in due course. The important thing is to not let VM_MERGEABLE
|
|
* be cleared while any such pages might remain in the area.
|
|
*/
|
|
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
|
|
}
|
|
|
|
static void break_cow(struct rmap_item *rmap_item)
|
|
{
|
|
struct mm_struct *mm = rmap_item->mm;
|
|
unsigned long addr = rmap_item->address;
|
|
struct vm_area_struct *vma;
|
|
|
|
/*
|
|
* It is not an accident that whenever we want to break COW
|
|
* to undo, we also need to drop a reference to the anon_vma.
|
|
*/
|
|
put_anon_vma(rmap_item->anon_vma);
|
|
|
|
down_read(&mm->mmap_sem);
|
|
if (ksm_test_exit(mm))
|
|
goto out;
|
|
vma = find_vma(mm, addr);
|
|
if (!vma || vma->vm_start > addr)
|
|
goto out;
|
|
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
|
|
goto out;
|
|
break_ksm(vma, addr);
|
|
out:
|
|
up_read(&mm->mmap_sem);
|
|
}
|
|
|
|
static struct page *page_trans_compound_anon(struct page *page)
|
|
{
|
|
if (PageTransCompound(page)) {
|
|
struct page *head = compound_trans_head(page);
|
|
/*
|
|
* head may actually be splitted and freed from under
|
|
* us but it's ok here.
|
|
*/
|
|
if (PageAnon(head))
|
|
return head;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static struct page *get_mergeable_page(struct rmap_item *rmap_item)
|
|
{
|
|
struct mm_struct *mm = rmap_item->mm;
|
|
unsigned long addr = rmap_item->address;
|
|
struct vm_area_struct *vma;
|
|
struct page *page;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
if (ksm_test_exit(mm))
|
|
goto out;
|
|
vma = find_vma(mm, addr);
|
|
if (!vma || vma->vm_start > addr)
|
|
goto out;
|
|
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
|
|
goto out;
|
|
|
|
page = follow_page(vma, addr, FOLL_GET);
|
|
if (IS_ERR_OR_NULL(page))
|
|
goto out;
|
|
if (PageAnon(page) || page_trans_compound_anon(page)) {
|
|
flush_anon_page(vma, page, addr);
|
|
flush_dcache_page(page);
|
|
} else {
|
|
put_page(page);
|
|
out: page = NULL;
|
|
}
|
|
up_read(&mm->mmap_sem);
|
|
return page;
|
|
}
|
|
|
|
static void remove_node_from_stable_tree(struct stable_node *stable_node)
|
|
{
|
|
struct rmap_item *rmap_item;
|
|
struct hlist_node *hlist;
|
|
|
|
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
|
|
if (rmap_item->hlist.next)
|
|
ksm_pages_sharing--;
|
|
else
|
|
ksm_pages_shared--;
|
|
put_anon_vma(rmap_item->anon_vma);
|
|
rmap_item->address &= PAGE_MASK;
|
|
cond_resched();
|
|
}
|
|
|
|
rb_erase(&stable_node->node, &root_stable_tree);
|
|
free_stable_node(stable_node);
|
|
}
|
|
|
|
/*
|
|
* get_ksm_page: checks if the page indicated by the stable node
|
|
* is still its ksm page, despite having held no reference to it.
|
|
* In which case we can trust the content of the page, and it
|
|
* returns the gotten page; but if the page has now been zapped,
|
|
* remove the stale node from the stable tree and return NULL.
|
|
*
|
|
* You would expect the stable_node to hold a reference to the ksm page.
|
|
* But if it increments the page's count, swapping out has to wait for
|
|
* ksmd to come around again before it can free the page, which may take
|
|
* seconds or even minutes: much too unresponsive. So instead we use a
|
|
* "keyhole reference": access to the ksm page from the stable node peeps
|
|
* out through its keyhole to see if that page still holds the right key,
|
|
* pointing back to this stable node. This relies on freeing a PageAnon
|
|
* page to reset its page->mapping to NULL, and relies on no other use of
|
|
* a page to put something that might look like our key in page->mapping.
|
|
*
|
|
* include/linux/pagemap.h page_cache_get_speculative() is a good reference,
|
|
* but this is different - made simpler by ksm_thread_mutex being held, but
|
|
* interesting for assuming that no other use of the struct page could ever
|
|
* put our expected_mapping into page->mapping (or a field of the union which
|
|
* coincides with page->mapping). The RCU calls are not for KSM at all, but
|
|
* to keep the page_count protocol described with page_cache_get_speculative.
|
|
*
|
|
* Note: it is possible that get_ksm_page() will return NULL one moment,
|
|
* then page the next, if the page is in between page_freeze_refs() and
|
|
* page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
|
|
* is on its way to being freed; but it is an anomaly to bear in mind.
|
|
*/
|
|
static struct page *get_ksm_page(struct stable_node *stable_node)
|
|
{
|
|
struct page *page;
|
|
void *expected_mapping;
|
|
|
|
page = pfn_to_page(stable_node->kpfn);
|
|
expected_mapping = (void *)stable_node +
|
|
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
|
|
rcu_read_lock();
|
|
if (page->mapping != expected_mapping)
|
|
goto stale;
|
|
if (!get_page_unless_zero(page))
|
|
goto stale;
|
|
if (page->mapping != expected_mapping) {
|
|
put_page(page);
|
|
goto stale;
|
|
}
|
|
rcu_read_unlock();
|
|
return page;
|
|
stale:
|
|
rcu_read_unlock();
|
|
remove_node_from_stable_tree(stable_node);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Removing rmap_item from stable or unstable tree.
|
|
* This function will clean the information from the stable/unstable tree.
|
|
*/
|
|
static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
|
|
{
|
|
if (rmap_item->address & STABLE_FLAG) {
|
|
struct stable_node *stable_node;
|
|
struct page *page;
|
|
|
|
stable_node = rmap_item->head;
|
|
page = get_ksm_page(stable_node);
|
|
if (!page)
|
|
goto out;
|
|
|
|
lock_page(page);
|
|
hlist_del(&rmap_item->hlist);
|
|
unlock_page(page);
|
|
put_page(page);
|
|
|
|
if (stable_node->hlist.first)
|
|
ksm_pages_sharing--;
|
|
else
|
|
ksm_pages_shared--;
|
|
|
|
put_anon_vma(rmap_item->anon_vma);
|
|
rmap_item->address &= PAGE_MASK;
|
|
|
|
} else if (rmap_item->address & UNSTABLE_FLAG) {
|
|
unsigned char age;
|
|
/*
|
|
* Usually ksmd can and must skip the rb_erase, because
|
|
* root_unstable_tree was already reset to RB_ROOT.
|
|
* But be careful when an mm is exiting: do the rb_erase
|
|
* if this rmap_item was inserted by this scan, rather
|
|
* than left over from before.
|
|
*/
|
|
age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
|
|
BUG_ON(age > 1);
|
|
if (!age)
|
|
rb_erase(&rmap_item->node, &root_unstable_tree);
|
|
|
|
ksm_pages_unshared--;
|
|
rmap_item->address &= PAGE_MASK;
|
|
}
|
|
out:
|
|
cond_resched(); /* we're called from many long loops */
|
|
}
|
|
|
|
static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
|
|
struct rmap_item **rmap_list)
|
|
{
|
|
while (*rmap_list) {
|
|
struct rmap_item *rmap_item = *rmap_list;
|
|
*rmap_list = rmap_item->rmap_list;
|
|
remove_rmap_item_from_tree(rmap_item);
|
|
free_rmap_item(rmap_item);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
|
|
* than check every pte of a given vma, the locking doesn't quite work for
|
|
* that - an rmap_item is assigned to the stable tree after inserting ksm
|
|
* page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
|
|
* rmap_items from parent to child at fork time (so as not to waste time
|
|
* if exit comes before the next scan reaches it).
|
|
*
|
|
* Similarly, although we'd like to remove rmap_items (so updating counts
|
|
* and freeing memory) when unmerging an area, it's easier to leave that
|
|
* to the next pass of ksmd - consider, for example, how ksmd might be
|
|
* in cmp_and_merge_page on one of the rmap_items we would be removing.
|
|
*/
|
|
static int unmerge_ksm_pages(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long addr;
|
|
int err = 0;
|
|
|
|
for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
|
|
if (ksm_test_exit(vma->vm_mm))
|
|
break;
|
|
if (signal_pending(current))
|
|
err = -ERESTARTSYS;
|
|
else
|
|
err = break_ksm(vma, addr);
|
|
}
|
|
return err;
|
|
}
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
/*
|
|
* Only called through the sysfs control interface:
|
|
*/
|
|
static int unmerge_and_remove_all_rmap_items(void)
|
|
{
|
|
struct mm_slot *mm_slot;
|
|
struct mm_struct *mm;
|
|
struct vm_area_struct *vma;
|
|
int err = 0;
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
|
|
struct mm_slot, mm_list);
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
|
for (mm_slot = ksm_scan.mm_slot;
|
|
mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
|
|
mm = mm_slot->mm;
|
|
down_read(&mm->mmap_sem);
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
if (ksm_test_exit(mm))
|
|
break;
|
|
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
|
|
continue;
|
|
err = unmerge_ksm_pages(vma,
|
|
vma->vm_start, vma->vm_end);
|
|
if (err)
|
|
goto error;
|
|
}
|
|
|
|
remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
|
|
struct mm_slot, mm_list);
|
|
if (ksm_test_exit(mm)) {
|
|
hlist_del(&mm_slot->link);
|
|
list_del(&mm_slot->mm_list);
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
|
free_mm_slot(mm_slot);
|
|
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
|
|
up_read(&mm->mmap_sem);
|
|
mmdrop(mm);
|
|
} else {
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
up_read(&mm->mmap_sem);
|
|
}
|
|
}
|
|
|
|
ksm_scan.seqnr = 0;
|
|
return 0;
|
|
|
|
error:
|
|
up_read(&mm->mmap_sem);
|
|
spin_lock(&ksm_mmlist_lock);
|
|
ksm_scan.mm_slot = &ksm_mm_head;
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
return err;
|
|
}
|
|
#endif /* CONFIG_SYSFS */
|
|
|
|
static u32 calc_checksum(struct page *page)
|
|
{
|
|
u32 checksum;
|
|
void *addr = kmap_atomic(page, KM_USER0);
|
|
checksum = jhash2(addr, PAGE_SIZE / 4, 17);
|
|
kunmap_atomic(addr, KM_USER0);
|
|
return checksum;
|
|
}
|
|
|
|
static int memcmp_pages(struct page *page1, struct page *page2)
|
|
{
|
|
char *addr1, *addr2;
|
|
int ret;
|
|
|
|
addr1 = kmap_atomic(page1, KM_USER0);
|
|
addr2 = kmap_atomic(page2, KM_USER1);
|
|
ret = memcmp(addr1, addr2, PAGE_SIZE);
|
|
kunmap_atomic(addr2, KM_USER1);
|
|
kunmap_atomic(addr1, KM_USER0);
|
|
return ret;
|
|
}
|
|
|
|
static inline int pages_identical(struct page *page1, struct page *page2)
|
|
{
|
|
return !memcmp_pages(page1, page2);
|
|
}
|
|
|
|
static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|
pte_t *orig_pte)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long addr;
|
|
pte_t *ptep;
|
|
spinlock_t *ptl;
|
|
int swapped;
|
|
int err = -EFAULT;
|
|
|
|
addr = page_address_in_vma(page, vma);
|
|
if (addr == -EFAULT)
|
|
goto out;
|
|
|
|
BUG_ON(PageTransCompound(page));
|
|
ptep = page_check_address(page, mm, addr, &ptl, 0);
|
|
if (!ptep)
|
|
goto out;
|
|
|
|
if (pte_write(*ptep) || pte_dirty(*ptep)) {
|
|
pte_t entry;
|
|
|
|
swapped = PageSwapCache(page);
|
|
flush_cache_page(vma, addr, page_to_pfn(page));
|
|
/*
|
|
* Ok this is tricky, when get_user_pages_fast() run it doesn't
|
|
* take any lock, therefore the check that we are going to make
|
|
* with the pagecount against the mapcount is racey and
|
|
* O_DIRECT can happen right after the check.
|
|
* So we clear the pte and flush the tlb before the check
|
|
* this assure us that no O_DIRECT can happen after the check
|
|
* or in the middle of the check.
|
|
*/
|
|
entry = ptep_clear_flush(vma, addr, ptep);
|
|
/*
|
|
* Check that no O_DIRECT or similar I/O is in progress on the
|
|
* page
|
|
*/
|
|
if (page_mapcount(page) + 1 + swapped != page_count(page)) {
|
|
set_pte_at(mm, addr, ptep, entry);
|
|
goto out_unlock;
|
|
}
|
|
if (pte_dirty(entry))
|
|
set_page_dirty(page);
|
|
entry = pte_mkclean(pte_wrprotect(entry));
|
|
set_pte_at_notify(mm, addr, ptep, entry);
|
|
}
|
|
*orig_pte = *ptep;
|
|
err = 0;
|
|
|
|
out_unlock:
|
|
pte_unmap_unlock(ptep, ptl);
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* replace_page - replace page in vma by new ksm page
|
|
* @vma: vma that holds the pte pointing to page
|
|
* @page: the page we are replacing by kpage
|
|
* @kpage: the ksm page we replace page by
|
|
* @orig_pte: the original value of the pte
|
|
*
|
|
* Returns 0 on success, -EFAULT on failure.
|
|
*/
|
|
static int replace_page(struct vm_area_struct *vma, struct page *page,
|
|
struct page *kpage, pte_t orig_pte)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *ptep;
|
|
spinlock_t *ptl;
|
|
unsigned long addr;
|
|
int err = -EFAULT;
|
|
|
|
addr = page_address_in_vma(page, vma);
|
|
if (addr == -EFAULT)
|
|
goto out;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
if (!pgd_present(*pgd))
|
|
goto out;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
if (!pud_present(*pud))
|
|
goto out;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
BUG_ON(pmd_trans_huge(*pmd));
|
|
if (!pmd_present(*pmd))
|
|
goto out;
|
|
|
|
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
if (!pte_same(*ptep, orig_pte)) {
|
|
pte_unmap_unlock(ptep, ptl);
|
|
goto out;
|
|
}
|
|
|
|
get_page(kpage);
|
|
page_add_anon_rmap(kpage, vma, addr);
|
|
|
|
flush_cache_page(vma, addr, pte_pfn(*ptep));
|
|
ptep_clear_flush(vma, addr, ptep);
|
|
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
|
|
|
|
page_remove_rmap(page);
|
|
if (!page_mapped(page))
|
|
try_to_free_swap(page);
|
|
put_page(page);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
err = 0;
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
static int page_trans_compound_anon_split(struct page *page)
|
|
{
|
|
int ret = 0;
|
|
struct page *transhuge_head = page_trans_compound_anon(page);
|
|
if (transhuge_head) {
|
|
/* Get the reference on the head to split it. */
|
|
if (get_page_unless_zero(transhuge_head)) {
|
|
/*
|
|
* Recheck we got the reference while the head
|
|
* was still anonymous.
|
|
*/
|
|
if (PageAnon(transhuge_head))
|
|
ret = split_huge_page(transhuge_head);
|
|
else
|
|
/*
|
|
* Retry later if split_huge_page run
|
|
* from under us.
|
|
*/
|
|
ret = 1;
|
|
put_page(transhuge_head);
|
|
} else
|
|
/* Retry later if split_huge_page run from under us. */
|
|
ret = 1;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* try_to_merge_one_page - take two pages and merge them into one
|
|
* @vma: the vma that holds the pte pointing to page
|
|
* @page: the PageAnon page that we want to replace with kpage
|
|
* @kpage: the PageKsm page that we want to map instead of page,
|
|
* or NULL the first time when we want to use page as kpage.
|
|
*
|
|
* This function returns 0 if the pages were merged, -EFAULT otherwise.
|
|
*/
|
|
static int try_to_merge_one_page(struct vm_area_struct *vma,
|
|
struct page *page, struct page *kpage)
|
|
{
|
|
pte_t orig_pte = __pte(0);
|
|
int err = -EFAULT;
|
|
|
|
if (page == kpage) /* ksm page forked */
|
|
return 0;
|
|
|
|
if (!(vma->vm_flags & VM_MERGEABLE))
|
|
goto out;
|
|
if (PageTransCompound(page) && page_trans_compound_anon_split(page))
|
|
goto out;
|
|
BUG_ON(PageTransCompound(page));
|
|
if (!PageAnon(page))
|
|
goto out;
|
|
|
|
/*
|
|
* We need the page lock to read a stable PageSwapCache in
|
|
* write_protect_page(). We use trylock_page() instead of
|
|
* lock_page() because we don't want to wait here - we
|
|
* prefer to continue scanning and merging different pages,
|
|
* then come back to this page when it is unlocked.
|
|
*/
|
|
if (!trylock_page(page))
|
|
goto out;
|
|
/*
|
|
* If this anonymous page is mapped only here, its pte may need
|
|
* to be write-protected. If it's mapped elsewhere, all of its
|
|
* ptes are necessarily already write-protected. But in either
|
|
* case, we need to lock and check page_count is not raised.
|
|
*/
|
|
if (write_protect_page(vma, page, &orig_pte) == 0) {
|
|
if (!kpage) {
|
|
/*
|
|
* While we hold page lock, upgrade page from
|
|
* PageAnon+anon_vma to PageKsm+NULL stable_node:
|
|
* stable_tree_insert() will update stable_node.
|
|
*/
|
|
set_page_stable_node(page, NULL);
|
|
mark_page_accessed(page);
|
|
err = 0;
|
|
} else if (pages_identical(page, kpage))
|
|
err = replace_page(vma, page, kpage, orig_pte);
|
|
}
|
|
|
|
if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
|
|
munlock_vma_page(page);
|
|
if (!PageMlocked(kpage)) {
|
|
unlock_page(page);
|
|
lock_page(kpage);
|
|
mlock_vma_page(kpage);
|
|
page = kpage; /* for final unlock */
|
|
}
|
|
}
|
|
|
|
unlock_page(page);
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
|
|
* but no new kernel page is allocated: kpage must already be a ksm page.
|
|
*
|
|
* This function returns 0 if the pages were merged, -EFAULT otherwise.
|
|
*/
|
|
static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
|
|
struct page *page, struct page *kpage)
|
|
{
|
|
struct mm_struct *mm = rmap_item->mm;
|
|
struct vm_area_struct *vma;
|
|
int err = -EFAULT;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
if (ksm_test_exit(mm))
|
|
goto out;
|
|
vma = find_vma(mm, rmap_item->address);
|
|
if (!vma || vma->vm_start > rmap_item->address)
|
|
goto out;
|
|
|
|
err = try_to_merge_one_page(vma, page, kpage);
|
|
if (err)
|
|
goto out;
|
|
|
|
/* Must get reference to anon_vma while still holding mmap_sem */
|
|
rmap_item->anon_vma = vma->anon_vma;
|
|
get_anon_vma(vma->anon_vma);
|
|
out:
|
|
up_read(&mm->mmap_sem);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* try_to_merge_two_pages - take two identical pages and prepare them
|
|
* to be merged into one page.
|
|
*
|
|
* This function returns the kpage if we successfully merged two identical
|
|
* pages into one ksm page, NULL otherwise.
|
|
*
|
|
* Note that this function upgrades page to ksm page: if one of the pages
|
|
* is already a ksm page, try_to_merge_with_ksm_page should be used.
|
|
*/
|
|
static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
|
|
struct page *page,
|
|
struct rmap_item *tree_rmap_item,
|
|
struct page *tree_page)
|
|
{
|
|
int err;
|
|
|
|
err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
|
|
if (!err) {
|
|
err = try_to_merge_with_ksm_page(tree_rmap_item,
|
|
tree_page, page);
|
|
/*
|
|
* If that fails, we have a ksm page with only one pte
|
|
* pointing to it: so break it.
|
|
*/
|
|
if (err)
|
|
break_cow(rmap_item);
|
|
}
|
|
return err ? NULL : page;
|
|
}
|
|
|
|
/*
|
|
* stable_tree_search - search for page inside the stable tree
|
|
*
|
|
* This function checks if there is a page inside the stable tree
|
|
* with identical content to the page that we are scanning right now.
|
|
*
|
|
* This function returns the stable tree node of identical content if found,
|
|
* NULL otherwise.
|
|
*/
|
|
static struct page *stable_tree_search(struct page *page)
|
|
{
|
|
struct rb_node *node = root_stable_tree.rb_node;
|
|
struct stable_node *stable_node;
|
|
|
|
stable_node = page_stable_node(page);
|
|
if (stable_node) { /* ksm page forked */
|
|
get_page(page);
|
|
return page;
|
|
}
|
|
|
|
while (node) {
|
|
struct page *tree_page;
|
|
int ret;
|
|
|
|
cond_resched();
|
|
stable_node = rb_entry(node, struct stable_node, node);
|
|
tree_page = get_ksm_page(stable_node);
|
|
if (!tree_page)
|
|
return NULL;
|
|
|
|
ret = memcmp_pages(page, tree_page);
|
|
|
|
if (ret < 0) {
|
|
put_page(tree_page);
|
|
node = node->rb_left;
|
|
} else if (ret > 0) {
|
|
put_page(tree_page);
|
|
node = node->rb_right;
|
|
} else
|
|
return tree_page;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* stable_tree_insert - insert rmap_item pointing to new ksm page
|
|
* into the stable tree.
|
|
*
|
|
* This function returns the stable tree node just allocated on success,
|
|
* NULL otherwise.
|
|
*/
|
|
static struct stable_node *stable_tree_insert(struct page *kpage)
|
|
{
|
|
struct rb_node **new = &root_stable_tree.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct stable_node *stable_node;
|
|
|
|
while (*new) {
|
|
struct page *tree_page;
|
|
int ret;
|
|
|
|
cond_resched();
|
|
stable_node = rb_entry(*new, struct stable_node, node);
|
|
tree_page = get_ksm_page(stable_node);
|
|
if (!tree_page)
|
|
return NULL;
|
|
|
|
ret = memcmp_pages(kpage, tree_page);
|
|
put_page(tree_page);
|
|
|
|
parent = *new;
|
|
if (ret < 0)
|
|
new = &parent->rb_left;
|
|
else if (ret > 0)
|
|
new = &parent->rb_right;
|
|
else {
|
|
/*
|
|
* It is not a bug that stable_tree_search() didn't
|
|
* find this node: because at that time our page was
|
|
* not yet write-protected, so may have changed since.
|
|
*/
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
stable_node = alloc_stable_node();
|
|
if (!stable_node)
|
|
return NULL;
|
|
|
|
rb_link_node(&stable_node->node, parent, new);
|
|
rb_insert_color(&stable_node->node, &root_stable_tree);
|
|
|
|
INIT_HLIST_HEAD(&stable_node->hlist);
|
|
|
|
stable_node->kpfn = page_to_pfn(kpage);
|
|
set_page_stable_node(kpage, stable_node);
|
|
|
|
return stable_node;
|
|
}
|
|
|
|
/*
|
|
* unstable_tree_search_insert - search for identical page,
|
|
* else insert rmap_item into the unstable tree.
|
|
*
|
|
* This function searches for a page in the unstable tree identical to the
|
|
* page currently being scanned; and if no identical page is found in the
|
|
* tree, we insert rmap_item as a new object into the unstable tree.
|
|
*
|
|
* This function returns pointer to rmap_item found to be identical
|
|
* to the currently scanned page, NULL otherwise.
|
|
*
|
|
* This function does both searching and inserting, because they share
|
|
* the same walking algorithm in an rbtree.
|
|
*/
|
|
static
|
|
struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
|
|
struct page *page,
|
|
struct page **tree_pagep)
|
|
|
|
{
|
|
struct rb_node **new = &root_unstable_tree.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
|
|
while (*new) {
|
|
struct rmap_item *tree_rmap_item;
|
|
struct page *tree_page;
|
|
int ret;
|
|
|
|
cond_resched();
|
|
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
|
|
tree_page = get_mergeable_page(tree_rmap_item);
|
|
if (IS_ERR_OR_NULL(tree_page))
|
|
return NULL;
|
|
|
|
/*
|
|
* Don't substitute a ksm page for a forked page.
|
|
*/
|
|
if (page == tree_page) {
|
|
put_page(tree_page);
|
|
return NULL;
|
|
}
|
|
|
|
ret = memcmp_pages(page, tree_page);
|
|
|
|
parent = *new;
|
|
if (ret < 0) {
|
|
put_page(tree_page);
|
|
new = &parent->rb_left;
|
|
} else if (ret > 0) {
|
|
put_page(tree_page);
|
|
new = &parent->rb_right;
|
|
} else {
|
|
*tree_pagep = tree_page;
|
|
return tree_rmap_item;
|
|
}
|
|
}
|
|
|
|
rmap_item->address |= UNSTABLE_FLAG;
|
|
rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
|
|
rb_link_node(&rmap_item->node, parent, new);
|
|
rb_insert_color(&rmap_item->node, &root_unstable_tree);
|
|
|
|
ksm_pages_unshared++;
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* stable_tree_append - add another rmap_item to the linked list of
|
|
* rmap_items hanging off a given node of the stable tree, all sharing
|
|
* the same ksm page.
|
|
*/
|
|
static void stable_tree_append(struct rmap_item *rmap_item,
|
|
struct stable_node *stable_node)
|
|
{
|
|
rmap_item->head = stable_node;
|
|
rmap_item->address |= STABLE_FLAG;
|
|
hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
|
|
|
|
if (rmap_item->hlist.next)
|
|
ksm_pages_sharing++;
|
|
else
|
|
ksm_pages_shared++;
|
|
}
|
|
|
|
/*
|
|
* cmp_and_merge_page - first see if page can be merged into the stable tree;
|
|
* if not, compare checksum to previous and if it's the same, see if page can
|
|
* be inserted into the unstable tree, or merged with a page already there and
|
|
* both transferred to the stable tree.
|
|
*
|
|
* @page: the page that we are searching identical page to.
|
|
* @rmap_item: the reverse mapping into the virtual address of this page
|
|
*/
|
|
static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
|
|
{
|
|
struct rmap_item *tree_rmap_item;
|
|
struct page *tree_page = NULL;
|
|
struct stable_node *stable_node;
|
|
struct page *kpage;
|
|
unsigned int checksum;
|
|
int err;
|
|
|
|
remove_rmap_item_from_tree(rmap_item);
|
|
|
|
/* We first start with searching the page inside the stable tree */
|
|
kpage = stable_tree_search(page);
|
|
if (kpage) {
|
|
err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
|
|
if (!err) {
|
|
/*
|
|
* The page was successfully merged:
|
|
* add its rmap_item to the stable tree.
|
|
*/
|
|
lock_page(kpage);
|
|
stable_tree_append(rmap_item, page_stable_node(kpage));
|
|
unlock_page(kpage);
|
|
}
|
|
put_page(kpage);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* If the hash value of the page has changed from the last time
|
|
* we calculated it, this page is changing frequently: therefore we
|
|
* don't want to insert it in the unstable tree, and we don't want
|
|
* to waste our time searching for something identical to it there.
|
|
*/
|
|
checksum = calc_checksum(page);
|
|
if (rmap_item->oldchecksum != checksum) {
|
|
rmap_item->oldchecksum = checksum;
|
|
return;
|
|
}
|
|
|
|
tree_rmap_item =
|
|
unstable_tree_search_insert(rmap_item, page, &tree_page);
|
|
if (tree_rmap_item) {
|
|
kpage = try_to_merge_two_pages(rmap_item, page,
|
|
tree_rmap_item, tree_page);
|
|
put_page(tree_page);
|
|
/*
|
|
* As soon as we merge this page, we want to remove the
|
|
* rmap_item of the page we have merged with from the unstable
|
|
* tree, and insert it instead as new node in the stable tree.
|
|
*/
|
|
if (kpage) {
|
|
remove_rmap_item_from_tree(tree_rmap_item);
|
|
|
|
lock_page(kpage);
|
|
stable_node = stable_tree_insert(kpage);
|
|
if (stable_node) {
|
|
stable_tree_append(tree_rmap_item, stable_node);
|
|
stable_tree_append(rmap_item, stable_node);
|
|
}
|
|
unlock_page(kpage);
|
|
|
|
/*
|
|
* If we fail to insert the page into the stable tree,
|
|
* we will have 2 virtual addresses that are pointing
|
|
* to a ksm page left outside the stable tree,
|
|
* in which case we need to break_cow on both.
|
|
*/
|
|
if (!stable_node) {
|
|
break_cow(tree_rmap_item);
|
|
break_cow(rmap_item);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
|
|
struct rmap_item **rmap_list,
|
|
unsigned long addr)
|
|
{
|
|
struct rmap_item *rmap_item;
|
|
|
|
while (*rmap_list) {
|
|
rmap_item = *rmap_list;
|
|
if ((rmap_item->address & PAGE_MASK) == addr)
|
|
return rmap_item;
|
|
if (rmap_item->address > addr)
|
|
break;
|
|
*rmap_list = rmap_item->rmap_list;
|
|
remove_rmap_item_from_tree(rmap_item);
|
|
free_rmap_item(rmap_item);
|
|
}
|
|
|
|
rmap_item = alloc_rmap_item();
|
|
if (rmap_item) {
|
|
/* It has already been zeroed */
|
|
rmap_item->mm = mm_slot->mm;
|
|
rmap_item->address = addr;
|
|
rmap_item->rmap_list = *rmap_list;
|
|
*rmap_list = rmap_item;
|
|
}
|
|
return rmap_item;
|
|
}
|
|
|
|
static struct rmap_item *scan_get_next_rmap_item(struct page **page)
|
|
{
|
|
struct mm_struct *mm;
|
|
struct mm_slot *slot;
|
|
struct vm_area_struct *vma;
|
|
struct rmap_item *rmap_item;
|
|
|
|
if (list_empty(&ksm_mm_head.mm_list))
|
|
return NULL;
|
|
|
|
slot = ksm_scan.mm_slot;
|
|
if (slot == &ksm_mm_head) {
|
|
/*
|
|
* A number of pages can hang around indefinitely on per-cpu
|
|
* pagevecs, raised page count preventing write_protect_page
|
|
* from merging them. Though it doesn't really matter much,
|
|
* it is puzzling to see some stuck in pages_volatile until
|
|
* other activity jostles them out, and they also prevented
|
|
* LTP's KSM test from succeeding deterministically; so drain
|
|
* them here (here rather than on entry to ksm_do_scan(),
|
|
* so we don't IPI too often when pages_to_scan is set low).
|
|
*/
|
|
lru_add_drain_all();
|
|
|
|
root_unstable_tree = RB_ROOT;
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
|
|
ksm_scan.mm_slot = slot;
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
/*
|
|
* Although we tested list_empty() above, a racing __ksm_exit
|
|
* of the last mm on the list may have removed it since then.
|
|
*/
|
|
if (slot == &ksm_mm_head)
|
|
return NULL;
|
|
next_mm:
|
|
ksm_scan.address = 0;
|
|
ksm_scan.rmap_list = &slot->rmap_list;
|
|
}
|
|
|
|
mm = slot->mm;
|
|
down_read(&mm->mmap_sem);
|
|
if (ksm_test_exit(mm))
|
|
vma = NULL;
|
|
else
|
|
vma = find_vma(mm, ksm_scan.address);
|
|
|
|
for (; vma; vma = vma->vm_next) {
|
|
if (!(vma->vm_flags & VM_MERGEABLE))
|
|
continue;
|
|
if (ksm_scan.address < vma->vm_start)
|
|
ksm_scan.address = vma->vm_start;
|
|
if (!vma->anon_vma)
|
|
ksm_scan.address = vma->vm_end;
|
|
|
|
while (ksm_scan.address < vma->vm_end) {
|
|
if (ksm_test_exit(mm))
|
|
break;
|
|
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
|
|
if (IS_ERR_OR_NULL(*page)) {
|
|
ksm_scan.address += PAGE_SIZE;
|
|
cond_resched();
|
|
continue;
|
|
}
|
|
if (PageAnon(*page) ||
|
|
page_trans_compound_anon(*page)) {
|
|
flush_anon_page(vma, *page, ksm_scan.address);
|
|
flush_dcache_page(*page);
|
|
rmap_item = get_next_rmap_item(slot,
|
|
ksm_scan.rmap_list, ksm_scan.address);
|
|
if (rmap_item) {
|
|
ksm_scan.rmap_list =
|
|
&rmap_item->rmap_list;
|
|
ksm_scan.address += PAGE_SIZE;
|
|
} else
|
|
put_page(*page);
|
|
up_read(&mm->mmap_sem);
|
|
return rmap_item;
|
|
}
|
|
put_page(*page);
|
|
ksm_scan.address += PAGE_SIZE;
|
|
cond_resched();
|
|
}
|
|
}
|
|
|
|
if (ksm_test_exit(mm)) {
|
|
ksm_scan.address = 0;
|
|
ksm_scan.rmap_list = &slot->rmap_list;
|
|
}
|
|
/*
|
|
* Nuke all the rmap_items that are above this current rmap:
|
|
* because there were no VM_MERGEABLE vmas with such addresses.
|
|
*/
|
|
remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
|
|
struct mm_slot, mm_list);
|
|
if (ksm_scan.address == 0) {
|
|
/*
|
|
* We've completed a full scan of all vmas, holding mmap_sem
|
|
* throughout, and found no VM_MERGEABLE: so do the same as
|
|
* __ksm_exit does to remove this mm from all our lists now.
|
|
* This applies either when cleaning up after __ksm_exit
|
|
* (but beware: we can reach here even before __ksm_exit),
|
|
* or when all VM_MERGEABLE areas have been unmapped (and
|
|
* mmap_sem then protects against race with MADV_MERGEABLE).
|
|
*/
|
|
hlist_del(&slot->link);
|
|
list_del(&slot->mm_list);
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
|
free_mm_slot(slot);
|
|
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
|
|
up_read(&mm->mmap_sem);
|
|
mmdrop(mm);
|
|
} else {
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
up_read(&mm->mmap_sem);
|
|
}
|
|
|
|
/* Repeat until we've completed scanning the whole list */
|
|
slot = ksm_scan.mm_slot;
|
|
if (slot != &ksm_mm_head)
|
|
goto next_mm;
|
|
|
|
ksm_scan.seqnr++;
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* ksm_do_scan - the ksm scanner main worker function.
|
|
* @scan_npages - number of pages we want to scan before we return.
|
|
*/
|
|
static void ksm_do_scan(unsigned int scan_npages)
|
|
{
|
|
struct rmap_item *rmap_item;
|
|
struct page *uninitialized_var(page);
|
|
|
|
while (scan_npages-- && likely(!freezing(current))) {
|
|
cond_resched();
|
|
rmap_item = scan_get_next_rmap_item(&page);
|
|
if (!rmap_item)
|
|
return;
|
|
if (!PageKsm(page) || !in_stable_tree(rmap_item))
|
|
cmp_and_merge_page(page, rmap_item);
|
|
put_page(page);
|
|
}
|
|
}
|
|
|
|
static int ksmd_should_run(void)
|
|
{
|
|
return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
|
|
}
|
|
|
|
static int ksm_scan_thread(void *nothing)
|
|
{
|
|
set_freezable();
|
|
set_user_nice(current, 5);
|
|
|
|
while (!kthread_should_stop()) {
|
|
mutex_lock(&ksm_thread_mutex);
|
|
if (ksmd_should_run())
|
|
ksm_do_scan(ksm_thread_pages_to_scan);
|
|
mutex_unlock(&ksm_thread_mutex);
|
|
|
|
try_to_freeze();
|
|
|
|
if (ksmd_should_run()) {
|
|
schedule_timeout_interruptible(
|
|
msecs_to_jiffies(ksm_thread_sleep_millisecs));
|
|
} else {
|
|
wait_event_freezable(ksm_thread_wait,
|
|
ksmd_should_run() || kthread_should_stop());
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, int advice, unsigned long *vm_flags)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
int err;
|
|
|
|
switch (advice) {
|
|
case MADV_MERGEABLE:
|
|
/*
|
|
* Be somewhat over-protective for now!
|
|
*/
|
|
if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
|
|
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
|
|
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
|
|
VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
|
|
return 0; /* just ignore the advice */
|
|
|
|
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
|
|
err = __ksm_enter(mm);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
*vm_flags |= VM_MERGEABLE;
|
|
break;
|
|
|
|
case MADV_UNMERGEABLE:
|
|
if (!(*vm_flags & VM_MERGEABLE))
|
|
return 0; /* just ignore the advice */
|
|
|
|
if (vma->anon_vma) {
|
|
err = unmerge_ksm_pages(vma, start, end);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
*vm_flags &= ~VM_MERGEABLE;
|
|
break;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int __ksm_enter(struct mm_struct *mm)
|
|
{
|
|
struct mm_slot *mm_slot;
|
|
int needs_wakeup;
|
|
|
|
mm_slot = alloc_mm_slot();
|
|
if (!mm_slot)
|
|
return -ENOMEM;
|
|
|
|
/* Check ksm_run too? Would need tighter locking */
|
|
needs_wakeup = list_empty(&ksm_mm_head.mm_list);
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
insert_to_mm_slots_hash(mm, mm_slot);
|
|
/*
|
|
* Insert just behind the scanning cursor, to let the area settle
|
|
* down a little; when fork is followed by immediate exec, we don't
|
|
* want ksmd to waste time setting up and tearing down an rmap_list.
|
|
*/
|
|
list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
|
set_bit(MMF_VM_MERGEABLE, &mm->flags);
|
|
atomic_inc(&mm->mm_count);
|
|
|
|
if (needs_wakeup)
|
|
wake_up_interruptible(&ksm_thread_wait);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __ksm_exit(struct mm_struct *mm)
|
|
{
|
|
struct mm_slot *mm_slot;
|
|
int easy_to_free = 0;
|
|
|
|
/*
|
|
* This process is exiting: if it's straightforward (as is the
|
|
* case when ksmd was never running), free mm_slot immediately.
|
|
* But if it's at the cursor or has rmap_items linked to it, use
|
|
* mmap_sem to synchronize with any break_cows before pagetables
|
|
* are freed, and leave the mm_slot on the list for ksmd to free.
|
|
* Beware: ksm may already have noticed it exiting and freed the slot.
|
|
*/
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
mm_slot = get_mm_slot(mm);
|
|
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
|
|
if (!mm_slot->rmap_list) {
|
|
hlist_del(&mm_slot->link);
|
|
list_del(&mm_slot->mm_list);
|
|
easy_to_free = 1;
|
|
} else {
|
|
list_move(&mm_slot->mm_list,
|
|
&ksm_scan.mm_slot->mm_list);
|
|
}
|
|
}
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
|
if (easy_to_free) {
|
|
free_mm_slot(mm_slot);
|
|
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
|
|
mmdrop(mm);
|
|
} else if (mm_slot) {
|
|
down_write(&mm->mmap_sem);
|
|
up_write(&mm->mmap_sem);
|
|
}
|
|
}
|
|
|
|
struct page *ksm_does_need_to_copy(struct page *page,
|
|
struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
struct page *new_page;
|
|
|
|
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
|
|
if (new_page) {
|
|
copy_user_highpage(new_page, page, address, vma);
|
|
|
|
SetPageDirty(new_page);
|
|
__SetPageUptodate(new_page);
|
|
SetPageSwapBacked(new_page);
|
|
__set_page_locked(new_page);
|
|
|
|
if (page_evictable(new_page, vma))
|
|
lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
|
|
else
|
|
add_page_to_unevictable_list(new_page);
|
|
}
|
|
|
|
return new_page;
|
|
}
|
|
|
|
int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
|
|
unsigned long *vm_flags)
|
|
{
|
|
struct stable_node *stable_node;
|
|
struct rmap_item *rmap_item;
|
|
struct hlist_node *hlist;
|
|
unsigned int mapcount = page_mapcount(page);
|
|
int referenced = 0;
|
|
int search_new_forks = 0;
|
|
|
|
VM_BUG_ON(!PageKsm(page));
|
|
VM_BUG_ON(!PageLocked(page));
|
|
|
|
stable_node = page_stable_node(page);
|
|
if (!stable_node)
|
|
return 0;
|
|
again:
|
|
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
|
|
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
|
struct anon_vma_chain *vmac;
|
|
struct vm_area_struct *vma;
|
|
|
|
anon_vma_lock(anon_vma);
|
|
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
|
|
vma = vmac->vma;
|
|
if (rmap_item->address < vma->vm_start ||
|
|
rmap_item->address >= vma->vm_end)
|
|
continue;
|
|
/*
|
|
* Initially we examine only the vma which covers this
|
|
* rmap_item; but later, if there is still work to do,
|
|
* we examine covering vmas in other mms: in case they
|
|
* were forked from the original since ksmd passed.
|
|
*/
|
|
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
|
|
continue;
|
|
|
|
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
|
|
continue;
|
|
|
|
referenced += page_referenced_one(page, vma,
|
|
rmap_item->address, &mapcount, vm_flags);
|
|
if (!search_new_forks || !mapcount)
|
|
break;
|
|
}
|
|
anon_vma_unlock(anon_vma);
|
|
if (!mapcount)
|
|
goto out;
|
|
}
|
|
if (!search_new_forks++)
|
|
goto again;
|
|
out:
|
|
return referenced;
|
|
}
|
|
|
|
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
|
{
|
|
struct stable_node *stable_node;
|
|
struct hlist_node *hlist;
|
|
struct rmap_item *rmap_item;
|
|
int ret = SWAP_AGAIN;
|
|
int search_new_forks = 0;
|
|
|
|
VM_BUG_ON(!PageKsm(page));
|
|
VM_BUG_ON(!PageLocked(page));
|
|
|
|
stable_node = page_stable_node(page);
|
|
if (!stable_node)
|
|
return SWAP_FAIL;
|
|
again:
|
|
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
|
|
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
|
struct anon_vma_chain *vmac;
|
|
struct vm_area_struct *vma;
|
|
|
|
anon_vma_lock(anon_vma);
|
|
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
|
|
vma = vmac->vma;
|
|
if (rmap_item->address < vma->vm_start ||
|
|
rmap_item->address >= vma->vm_end)
|
|
continue;
|
|
/*
|
|
* Initially we examine only the vma which covers this
|
|
* rmap_item; but later, if there is still work to do,
|
|
* we examine covering vmas in other mms: in case they
|
|
* were forked from the original since ksmd passed.
|
|
*/
|
|
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
|
|
continue;
|
|
|
|
ret = try_to_unmap_one(page, vma,
|
|
rmap_item->address, flags);
|
|
if (ret != SWAP_AGAIN || !page_mapped(page)) {
|
|
anon_vma_unlock(anon_vma);
|
|
goto out;
|
|
}
|
|
}
|
|
anon_vma_unlock(anon_vma);
|
|
}
|
|
if (!search_new_forks++)
|
|
goto again;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
|
|
struct vm_area_struct *, unsigned long, void *), void *arg)
|
|
{
|
|
struct stable_node *stable_node;
|
|
struct hlist_node *hlist;
|
|
struct rmap_item *rmap_item;
|
|
int ret = SWAP_AGAIN;
|
|
int search_new_forks = 0;
|
|
|
|
VM_BUG_ON(!PageKsm(page));
|
|
VM_BUG_ON(!PageLocked(page));
|
|
|
|
stable_node = page_stable_node(page);
|
|
if (!stable_node)
|
|
return ret;
|
|
again:
|
|
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
|
|
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
|
struct anon_vma_chain *vmac;
|
|
struct vm_area_struct *vma;
|
|
|
|
anon_vma_lock(anon_vma);
|
|
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
|
|
vma = vmac->vma;
|
|
if (rmap_item->address < vma->vm_start ||
|
|
rmap_item->address >= vma->vm_end)
|
|
continue;
|
|
/*
|
|
* Initially we examine only the vma which covers this
|
|
* rmap_item; but later, if there is still work to do,
|
|
* we examine covering vmas in other mms: in case they
|
|
* were forked from the original since ksmd passed.
|
|
*/
|
|
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
|
|
continue;
|
|
|
|
ret = rmap_one(page, vma, rmap_item->address, arg);
|
|
if (ret != SWAP_AGAIN) {
|
|
anon_vma_unlock(anon_vma);
|
|
goto out;
|
|
}
|
|
}
|
|
anon_vma_unlock(anon_vma);
|
|
}
|
|
if (!search_new_forks++)
|
|
goto again;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
|
|
{
|
|
struct stable_node *stable_node;
|
|
|
|
VM_BUG_ON(!PageLocked(oldpage));
|
|
VM_BUG_ON(!PageLocked(newpage));
|
|
VM_BUG_ON(newpage->mapping != oldpage->mapping);
|
|
|
|
stable_node = page_stable_node(newpage);
|
|
if (stable_node) {
|
|
VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
|
|
stable_node->kpfn = page_to_pfn(newpage);
|
|
}
|
|
}
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
|
|
unsigned long end_pfn)
|
|
{
|
|
struct rb_node *node;
|
|
|
|
for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
|
|
struct stable_node *stable_node;
|
|
|
|
stable_node = rb_entry(node, struct stable_node, node);
|
|
if (stable_node->kpfn >= start_pfn &&
|
|
stable_node->kpfn < end_pfn)
|
|
return stable_node;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static int ksm_memory_callback(struct notifier_block *self,
|
|
unsigned long action, void *arg)
|
|
{
|
|
struct memory_notify *mn = arg;
|
|
struct stable_node *stable_node;
|
|
|
|
switch (action) {
|
|
case MEM_GOING_OFFLINE:
|
|
/*
|
|
* Keep it very simple for now: just lock out ksmd and
|
|
* MADV_UNMERGEABLE while any memory is going offline.
|
|
* mutex_lock_nested() is necessary because lockdep was alarmed
|
|
* that here we take ksm_thread_mutex inside notifier chain
|
|
* mutex, and later take notifier chain mutex inside
|
|
* ksm_thread_mutex to unlock it. But that's safe because both
|
|
* are inside mem_hotplug_mutex.
|
|
*/
|
|
mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING);
|
|
break;
|
|
|
|
case MEM_OFFLINE:
|
|
/*
|
|
* Most of the work is done by page migration; but there might
|
|
* be a few stable_nodes left over, still pointing to struct
|
|
* pages which have been offlined: prune those from the tree.
|
|
*/
|
|
while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
|
|
mn->start_pfn + mn->nr_pages)) != NULL)
|
|
remove_node_from_stable_tree(stable_node);
|
|
/* fallthrough */
|
|
|
|
case MEM_CANCEL_OFFLINE:
|
|
mutex_unlock(&ksm_thread_mutex);
|
|
break;
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
/*
|
|
* This all compiles without CONFIG_SYSFS, but is a waste of space.
|
|
*/
|
|
|
|
#define KSM_ATTR_RO(_name) \
|
|
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
|
#define KSM_ATTR(_name) \
|
|
static struct kobj_attribute _name##_attr = \
|
|
__ATTR(_name, 0644, _name##_show, _name##_store)
|
|
|
|
static ssize_t sleep_millisecs_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
|
|
}
|
|
|
|
static ssize_t sleep_millisecs_store(struct kobject *kobj,
|
|
struct kobj_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
unsigned long msecs;
|
|
int err;
|
|
|
|
err = strict_strtoul(buf, 10, &msecs);
|
|
if (err || msecs > UINT_MAX)
|
|
return -EINVAL;
|
|
|
|
ksm_thread_sleep_millisecs = msecs;
|
|
|
|
return count;
|
|
}
|
|
KSM_ATTR(sleep_millisecs);
|
|
|
|
static ssize_t pages_to_scan_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
|
|
}
|
|
|
|
static ssize_t pages_to_scan_store(struct kobject *kobj,
|
|
struct kobj_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
int err;
|
|
unsigned long nr_pages;
|
|
|
|
err = strict_strtoul(buf, 10, &nr_pages);
|
|
if (err || nr_pages > UINT_MAX)
|
|
return -EINVAL;
|
|
|
|
ksm_thread_pages_to_scan = nr_pages;
|
|
|
|
return count;
|
|
}
|
|
KSM_ATTR(pages_to_scan);
|
|
|
|
static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
return sprintf(buf, "%u\n", ksm_run);
|
|
}
|
|
|
|
static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
int err;
|
|
unsigned long flags;
|
|
|
|
err = strict_strtoul(buf, 10, &flags);
|
|
if (err || flags > UINT_MAX)
|
|
return -EINVAL;
|
|
if (flags > KSM_RUN_UNMERGE)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
|
|
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
|
|
* breaking COW to free the pages_shared (but leaves mm_slots
|
|
* on the list for when ksmd may be set running again).
|
|
*/
|
|
|
|
mutex_lock(&ksm_thread_mutex);
|
|
if (ksm_run != flags) {
|
|
ksm_run = flags;
|
|
if (flags & KSM_RUN_UNMERGE) {
|
|
int oom_score_adj;
|
|
|
|
oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
|
|
err = unmerge_and_remove_all_rmap_items();
|
|
compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX,
|
|
oom_score_adj);
|
|
if (err) {
|
|
ksm_run = KSM_RUN_STOP;
|
|
count = err;
|
|
}
|
|
}
|
|
}
|
|
mutex_unlock(&ksm_thread_mutex);
|
|
|
|
if (flags & KSM_RUN_MERGE)
|
|
wake_up_interruptible(&ksm_thread_wait);
|
|
|
|
return count;
|
|
}
|
|
KSM_ATTR(run);
|
|
|
|
static ssize_t pages_shared_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%lu\n", ksm_pages_shared);
|
|
}
|
|
KSM_ATTR_RO(pages_shared);
|
|
|
|
static ssize_t pages_sharing_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%lu\n", ksm_pages_sharing);
|
|
}
|
|
KSM_ATTR_RO(pages_sharing);
|
|
|
|
static ssize_t pages_unshared_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%lu\n", ksm_pages_unshared);
|
|
}
|
|
KSM_ATTR_RO(pages_unshared);
|
|
|
|
static ssize_t pages_volatile_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
long ksm_pages_volatile;
|
|
|
|
ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
|
|
- ksm_pages_sharing - ksm_pages_unshared;
|
|
/*
|
|
* It was not worth any locking to calculate that statistic,
|
|
* but it might therefore sometimes be negative: conceal that.
|
|
*/
|
|
if (ksm_pages_volatile < 0)
|
|
ksm_pages_volatile = 0;
|
|
return sprintf(buf, "%ld\n", ksm_pages_volatile);
|
|
}
|
|
KSM_ATTR_RO(pages_volatile);
|
|
|
|
static ssize_t full_scans_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
return sprintf(buf, "%lu\n", ksm_scan.seqnr);
|
|
}
|
|
KSM_ATTR_RO(full_scans);
|
|
|
|
static struct attribute *ksm_attrs[] = {
|
|
&sleep_millisecs_attr.attr,
|
|
&pages_to_scan_attr.attr,
|
|
&run_attr.attr,
|
|
&pages_shared_attr.attr,
|
|
&pages_sharing_attr.attr,
|
|
&pages_unshared_attr.attr,
|
|
&pages_volatile_attr.attr,
|
|
&full_scans_attr.attr,
|
|
NULL,
|
|
};
|
|
|
|
static struct attribute_group ksm_attr_group = {
|
|
.attrs = ksm_attrs,
|
|
.name = "ksm",
|
|
};
|
|
#endif /* CONFIG_SYSFS */
|
|
|
|
static int __init ksm_init(void)
|
|
{
|
|
struct task_struct *ksm_thread;
|
|
int err;
|
|
|
|
err = ksm_slab_init();
|
|
if (err)
|
|
goto out;
|
|
|
|
ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
|
|
if (IS_ERR(ksm_thread)) {
|
|
printk(KERN_ERR "ksm: creating kthread failed\n");
|
|
err = PTR_ERR(ksm_thread);
|
|
goto out_free;
|
|
}
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
err = sysfs_create_group(mm_kobj, &ksm_attr_group);
|
|
if (err) {
|
|
printk(KERN_ERR "ksm: register sysfs failed\n");
|
|
kthread_stop(ksm_thread);
|
|
goto out_free;
|
|
}
|
|
#else
|
|
ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
|
|
|
|
#endif /* CONFIG_SYSFS */
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
/*
|
|
* Choose a high priority since the callback takes ksm_thread_mutex:
|
|
* later callbacks could only be taking locks which nest within that.
|
|
*/
|
|
hotplug_memory_notifier(ksm_memory_callback, 100);
|
|
#endif
|
|
return 0;
|
|
|
|
out_free:
|
|
ksm_slab_free();
|
|
out:
|
|
return err;
|
|
}
|
|
module_init(ksm_init)
|