mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-17 23:25:46 +08:00
memcg: move_lists on page not page_cgroup
Each caller of mem_cgroup_move_lists is having to use page_get_page_cgroup: it's more convenient if it acts upon the page itself not the page_cgroup; and in a later patch this becomes important to handle within memcontrol.c. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bd845e38c7
commit
427d5416f3
@ -36,7 +36,7 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask);
|
||||
extern void mem_cgroup_uncharge(struct page_cgroup *pc);
|
||||
extern void mem_cgroup_uncharge_page(struct page *page);
|
||||
extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active);
|
||||
extern void mem_cgroup_move_lists(struct page *page, bool active);
|
||||
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||
struct list_head *dst,
|
||||
unsigned long *scanned, int order,
|
||||
@ -106,8 +106,7 @@ static inline void mem_cgroup_uncharge_page(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
|
||||
bool active)
|
||||
static inline void mem_cgroup_move_lists(struct page *page, bool active)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -407,11 +407,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
|
||||
/*
|
||||
* This routine assumes that the appropriate zone's lru lock is already held
|
||||
*/
|
||||
void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
|
||||
void mem_cgroup_move_lists(struct page *page, bool active)
|
||||
{
|
||||
struct page_cgroup *pc;
|
||||
struct mem_cgroup_per_zone *mz;
|
||||
unsigned long flags;
|
||||
|
||||
pc = page_get_page_cgroup(page);
|
||||
if (!pc)
|
||||
return;
|
||||
|
||||
|
@ -176,7 +176,7 @@ void activate_page(struct page *page)
|
||||
SetPageActive(page);
|
||||
add_page_to_active_list(zone, page);
|
||||
__count_vm_event(PGACTIVATE);
|
||||
mem_cgroup_move_lists(page_get_page_cgroup(page), true);
|
||||
mem_cgroup_move_lists(page, true);
|
||||
}
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
}
|
||||
|
@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
||||
ClearPageActive(page);
|
||||
|
||||
list_move(&page->lru, &zone->inactive_list);
|
||||
mem_cgroup_move_lists(page_get_page_cgroup(page), false);
|
||||
mem_cgroup_move_lists(page, false);
|
||||
pgmoved++;
|
||||
if (!pagevec_add(&pvec, page)) {
|
||||
__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
|
||||
@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
||||
VM_BUG_ON(PageLRU(page));
|
||||
SetPageLRU(page);
|
||||
VM_BUG_ON(!PageActive(page));
|
||||
|
||||
list_move(&page->lru, &zone->active_list);
|
||||
mem_cgroup_move_lists(page_get_page_cgroup(page), true);
|
||||
mem_cgroup_move_lists(page, true);
|
||||
pgmoved++;
|
||||
if (!pagevec_add(&pvec, page)) {
|
||||
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
|
||||
|
Loading…
Reference in New Issue
Block a user