From e0f3c3f78da29b114e7c1c68019036559f715948 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 8 Oct 2012 16:29:26 -0700 Subject: [PATCH] mm/mmu_notifier: init notifier if necessary While registering MMU notifier, new instance of MMU notifier_mm will be allocated and later free'd if currrent mm_struct's MMU notifier_mm has been initialized. That causes some overhead. The patch tries to elominate that. Signed-off-by: Gavin Shan Signed-off-by: Wanpeng Li Cc: Andrea Arcangeli Cc: Avi Kivity Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Xiao Guangrong Cc: Sagi Grimberg Cc: Haggai Eran Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmu_notifier.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 35ff447d8d14..947df83dccb0 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -207,22 +207,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, */ BUG_ON(!srcu.per_cpu_ref); - ret = -ENOMEM; - mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); - if (unlikely(!mmu_notifier_mm)) - goto out; - if (take_mmap_sem) down_write(&mm->mmap_sem); ret = mm_take_all_locks(mm); if (unlikely(ret)) - goto out_cleanup; + goto out; if (!mm_has_notifiers(mm)) { + mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), + GFP_KERNEL); + if (unlikely(!mmu_notifier_mm)) { + ret = -ENOMEM; + goto out_of_mem; + } INIT_HLIST_HEAD(&mmu_notifier_mm->list); spin_lock_init(&mmu_notifier_mm->lock); + mm->mmu_notifier_mm = mmu_notifier_mm; - mmu_notifier_mm = NULL; } atomic_inc(&mm->mm_count); @@ -238,13 +239,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); spin_unlock(&mm->mmu_notifier_mm->lock); +out_of_mem: mm_drop_all_locks(mm); -out_cleanup: +out: if (take_mmap_sem) up_write(&mm->mmap_sem); - /* kfree() does nothing if mmu_notifier_mm is NULL */ - kfree(mmu_notifier_mm); -out: + BUG_ON(atomic_read(&mm->mm_users) <= 0); return ret; }