memcg: remove redundant function calls

remove_list/add_list uses page_cgroup_zoneinfo() in it.

So, it's called twice before and after lock.

	mz = page_cgroup_zoneinfo();
	lock();
	mz = page_cgroup_zoneinfo();
	....
	unlock();

And address of mz never changes.

This is not good. This patch fixes this behavior.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2008-04-29 01:00:22 -07:00 committed by Linus Torvalds
parent 29f2a4dac8
commit 3eae90c3cd

View File

@ -275,10 +275,10 @@ static void unlock_page_cgroup(struct page *page)
bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
} }
static void __mem_cgroup_remove_list(struct page_cgroup *pc) static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
struct page_cgroup *pc)
{ {
int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
if (from) if (from)
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
@ -289,10 +289,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc)
list_del_init(&pc->lru); list_del_init(&pc->lru);
} }
static void __mem_cgroup_add_list(struct page_cgroup *pc) static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
struct page_cgroup *pc)
{ {
int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
if (!to) { if (!to) {
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
@ -618,7 +618,7 @@ retry:
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_add_list(pc); __mem_cgroup_add_list(mz, pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
unlock_page_cgroup(page); unlock_page_cgroup(page);
@ -674,7 +674,7 @@ void mem_cgroup_uncharge_page(struct page *page)
if (--(pc->ref_cnt) == 0) { if (--(pc->ref_cnt) == 0) {
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_remove_list(pc); __mem_cgroup_remove_list(mz, pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
page_assign_page_cgroup(page, NULL); page_assign_page_cgroup(page, NULL);
@ -736,7 +736,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_remove_list(pc); __mem_cgroup_remove_list(mz, pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
page_assign_page_cgroup(page, NULL); page_assign_page_cgroup(page, NULL);
@ -748,7 +748,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_add_list(pc); __mem_cgroup_add_list(mz, pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
unlock_page_cgroup(newpage); unlock_page_cgroup(newpage);