From 7393dc45f6ed5d3aba43b06d49eb3b15f1318906 Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Mon, 30 Sep 2013 13:45:01 -0700 Subject: [PATCH 01/22] revert "mm/memory-hotplug: fix lowmem count overflow when offline pages" This reverts commit cea27eb2a202 ("mm/memory-hotplug: fix lowmem count overflow when offline pages"). The fixed bug by commit cea27eb was fixed to another way by commit 3dcc0571cd64 ("mm: correctly update zone->managed_pages"). That commit enhances memory_hotplug.c to adjust totalhigh_pages when hot-removing memory, for details please refer to: http://marc.info/?l=linux-mm&m=136957578620221&w=2 As a result, commit cea27eb2a202 currently causes duplicated decreasing of totalhigh_pages, thus the revert. Signed-off-by: Joonyoung Shim Reviewed-by: Wanpeng Li Cc: Jiang Liu Cc: KOSAKI Motohiro Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0ee638f76ebe..dd886fac451a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) list_del(&page->lru); rmv_page_order(page); zone->free_area[order].nr_free--; -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page)) - totalhigh_pages -= 1 << order; -#endif for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); From 72023656961b8c81a168a7a6762d589339d0d7ec Mon Sep 17 00:00:00 2001 From: Dan Aloni Date: Mon, 30 Sep 2013 13:45:02 -0700 Subject: [PATCH 02/22] fs/binfmt_elf.c: prevent a coredump with a large vm_map_count from Oopsing A high setting of max_map_count, and a process core-dumping with a large enough vm_map_count could result in an NT_FILE note not being written, and the kernel crashing immediately later because it has assumed otherwise. Reproduction of the oops-causing bug described here: https://lkml.org/lkml/2013/8/30/50 Rge ussue originated in commit 2aa362c49c31 ("coredump: extend core dump note section to contain file names of mapped file") from Oct 4, 2012. This patch make that section optional in that case. fill_files_note() should signify the error, and also let the info struct in elf_core_dump() be zero-initialized so that we can check for the optionally written note. [akpm@linux-foundation.org: avoid abusing E2BIG, remove a couple of not-really-needed local variables] [akpm@linux-foundation.org: fix sparse warning] Signed-off-by: Dan Aloni Cc: Al Viro Cc: Denys Vlasenko Reported-by: Martin MOKREJS Tested-by: Martin MOKREJS Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 100edcc5e312..4c94a79991bb 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, * long file_ofs * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... */ -static void fill_files_note(struct memelfnote *note) +static int fill_files_note(struct memelfnote *note) { struct vm_area_struct *vma; unsigned count, size, names_ofs, remaining, n; @@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note) names_ofs = (2 + 3 * count) * sizeof(data[0]); alloc: if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ - goto err; + return -EINVAL; size = round_up(size, PAGE_SIZE); data = vmalloc(size); if (!data) - goto err; + return -ENOMEM; start_end_ofs = data + 2; name_base = name_curpos = ((char *)data) + names_ofs; @@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note) size = name_curpos - (char *)data; fill_note(note, "CORE", NT_FILE, size, data); - err: ; + return 0; } #ifdef CORE_DUMP_USE_REGSET @@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, fill_auxv_note(&info->auxv, current->mm); info->size += notesize(&info->auxv); - fill_files_note(&info->files); - info->size += notesize(&info->files); + if (fill_files_note(&info->files) == 0) + info->size += notesize(&info->files); return 1; } @@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info, return 0; if (first && !writenote(&info->auxv, file, foffset)) return 0; - if (first && !writenote(&info->files, file, foffset)) + if (first && info->files.data && + !writenote(&info->files, file, foffset)) return 0; for (i = 1; i < info->thread_notes; ++i) @@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) struct elf_note_info { struct memelfnote *notes; + struct memelfnote *notes_files; struct elf_prstatus *prstatus; /* NT_PRSTATUS */ struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ struct list_head thread_list; @@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); fill_auxv_note(info->notes + 3, current->mm); - fill_files_note(info->notes + 4); + info->numnote = 4; - info->numnote = 5; + if (fill_files_note(info->notes + info->numnote) == 0) { + info->notes_files = info->notes + info->numnote; + info->numnote++; + } /* Try to dump the FPU. */ info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, @@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info) kfree(list_entry(tmp, struct elf_thread_status, list)); } - /* Free data allocated by fill_files_note(): */ - vfree(info->notes[4].data); + /* Free data possibly allocated by fill_files_note(): */ + if (info->notes_files) + vfree(info->notes_files->data); kfree(info->prstatus); kfree(info->psinfo); @@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm) struct vm_area_struct *vma, *gate_vma; struct elfhdr *elf = NULL; loff_t offset = 0, dataoff, foffset; - struct elf_note_info info; + struct elf_note_info info = { }; struct elf_phdr *phdr4note = NULL; struct elf_shdr *shdr4extnum = NULL; Elf_Half e_phnum; From f6ea3adb70b20ae36277a1b0eaaf4da9f6479a28 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 30 Sep 2013 13:45:03 -0700 Subject: [PATCH 03/22] mm/compaction.c: periodically schedule when freeing pages We've been getting warnings about an excessive amount of time spent allocating pages for migration during memory compaction without scheduling. isolate_freepages_block() already periodically checks for contended locks or the need to schedule, but isolate_freepages() never does. When a zone is massively long and no suitable targets can be found, this iteration can be quite expensive without ever doing cond_resched(). Check periodically for the need to reschedule while the compaction free scanner iterates. Signed-off-by: David Rientjes Reviewed-by: Rik van Riel Reviewed-by: Wanpeng Li Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/compaction.c b/mm/compaction.c index c43789388cd8..b5326b141a25 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone, pfn -= pageblock_nr_pages) { unsigned long isolated; + /* + * This can iterate a massively long zone without finding any + * suitable migration targets, so periodically check if we need + * to schedule. + */ + cond_resched(); + if (!pfn_valid(pfn)) continue; From 5e9d527591421ccdb16acb8c23662231135d8686 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Mon, 30 Sep 2013 13:45:04 -0700 Subject: [PATCH 04/22] ipc/sem.c: fix race in sem_lock() The exclusion of complex operations in sem_lock() is insufficient: after acquiring the per-semaphore lock, a simple op must first check that sem_perm.lock is not locked and only after that test check complex_count. The current code does it the other way around - and that creates a race. Details are below. The patch is a complete rewrite of sem_lock(), based in part on the code from Mike Galbraith. It removes all gotos and all loops and thus the risk of livelocks. I have tested the patch (together with the next one) on my i3 laptop and it didn't cause any problems. The bug is probably also present in 3.10 and 3.11, but for these kernels it might be simpler just to move the test of sma->complex_count after the spin_is_locked() test. Details of the bug: Assume: - sma->complex_count = 0. - Thread 1: semtimedop(complex op that must sleep) - Thread 2: semtimedop(simple op). Pseudo-Trace: Thread 1: sem_lock(): acquire sem_perm.lock Thread 1: sem_lock(): check for ongoing simple ops Nothing ongoing, thread 2 is still before sem_lock(). Thread 1: try_atomic_semop() <<< preempted. Thread 2: sem_lock(): static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { int locknum; again: if (nsops == 1 && !sma->complex_count) { struct sem *sem = sma->sem_base + sops->sem_num; /* Lock just the semaphore we are interested in. */ spin_lock(&sem->lock); /* * If sma->complex_count was set while we were spinning, * we may need to look at things we did not lock here. */ if (unlikely(sma->complex_count)) { spin_unlock(&sem->lock); goto lock_array; } <<<<<<<<< <<< complex_count is still 0. <<< <<< Here it is preempted <<<<<<<<< Thread 1: try_atomic_semop() returns, notices that it must sleep. Thread 1: increases sma->complex_count. Thread 1: drops sem_perm.lock Thread 2: /* * Another process is holding the global lock on the * sem_array; we cannot enter our critical section, * but have to wait for the global lock to be released. */ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { spin_unlock(&sem->lock); spin_unlock_wait(&sma->sem_perm.lock); goto again; } <<< sem_perm.lock already dropped, thus no "goto again;" locknum = sops->sem_num; Signed-off-by: Manfred Spraul Cc: Mike Galbraith Cc: Rik van Riel Cc: Davidlohr Bueso Cc: [3.10+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 132 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 83 insertions(+), 49 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 19c8b980d1fe..4a92c0447ad6 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -252,71 +252,105 @@ static void sem_rcu_free(struct rcu_head *head) ipc_rcu_free(head); } +/* + * Wait until all currently ongoing simple ops have completed. + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check + * that sem_perm.lock is free. + */ +static void sem_wait_array(struct sem_array *sma) +{ + int i; + struct sem *sem; + + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } +} + /* * If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. * Otherwise, lock the entire semaphore array, since we either have * multiple semaphores in our own semops, or we need to look at * semaphores from other pending complex operations. - * - * Carefully guard against sma->complex_count changing between zero - * and non-zero while we are spinning for the lock. The value of - * sma->complex_count cannot change while we are holding the lock, - * so sem_unlock should be fine. - * - * The global lock path checks that all the local locks have been released, - * checking each local lock once. This means that the local lock paths - * cannot start their critical sections while the global lock is held. */ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { - int locknum; - again: - if (nsops == 1 && !sma->complex_count) { - struct sem *sem = sma->sem_base + sops->sem_num; + struct sem *sem; - /* Lock just the semaphore we are interested in. */ + if (nsops != 1) { + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + + /* And wait until all simple ops that are processed + * right now have dropped their locks. + */ + sem_wait_array(sma); + return -1; + } + + /* + * Only one semaphore affected - try to optimize locking. + * The rules are: + * - optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * - The test for enqueued complex ops is simple: + * sma->complex_count != 0 + * - Testing for complex ops that are processed right now is + * a bit more difficult. Complex ops acquire the full lock + * and first wait that the running simple ops have completed. + * (see above) + * Thus: If we own a simple lock and the global lock is free + * and complex_count is now 0, then it will stay 0 and + * thus just locking sem->lock is sufficient. + */ + sem = sma->sem_base + sops->sem_num; + + if (sma->complex_count == 0) { + /* + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. + */ spin_lock(&sem->lock); - /* - * If sma->complex_count was set while we were spinning, - * we may need to look at things we did not lock here. - */ - if (unlikely(sma->complex_count)) { - spin_unlock(&sem->lock); - goto lock_array; - } + /* Then check that the global lock is free */ + if (!spin_is_locked(&sma->sem_perm.lock)) { + /* spin_is_locked() is not a memory barrier */ + smp_mb(); - /* - * Another process is holding the global lock on the - * sem_array; we cannot enter our critical section, - * but have to wait for the global lock to be released. - */ - if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { - spin_unlock(&sem->lock); - spin_unlock_wait(&sma->sem_perm.lock); - goto again; + /* Now repeat the test of complex_count: + * It can't change anymore until we drop sem->lock. + * Thus: if is now 0, then it will stay 0. + */ + if (sma->complex_count == 0) { + /* fast path successful! */ + return sops->sem_num; + } } - - locknum = sops->sem_num; - } else { - int i; - /* - * Lock the semaphore array, and wait for all of the - * individual semaphore locks to go away. The code - * above ensures no new single-lock holders will enter - * their critical section while the array lock is held. - */ - lock_array: - ipc_lock_object(&sma->sem_perm); - for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; - spin_unlock_wait(&sem->lock); - } - locknum = -1; + spin_unlock(&sem->lock); + } + + /* slow path: acquire the full lock */ + ipc_lock_object(&sma->sem_perm); + + if (sma->complex_count == 0) { + /* False alarm: + * There is no complex operation, thus we can switch + * back to the fast path. + */ + spin_lock(&sem->lock); + ipc_unlock_object(&sma->sem_perm); + return sops->sem_num; + } else { + /* Not a false alarm, thus complete the sequence for a + * full lock. + */ + sem_wait_array(sma); + return -1; } - return locknum; } static inline void sem_unlock(struct sem_array *sma, int locknum) From 6d07b68ce16ae9535955ba2059dedba5309c3ca1 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Mon, 30 Sep 2013 13:45:06 -0700 Subject: [PATCH 05/22] ipc/sem.c: optimize sem_lock() Operations that need access to the whole array must guarantee that there are no simple operations ongoing. Right now this is achieved by spin_unlock_wait(sem->lock) on all semaphores. If complex_count is nonzero, then this spin_unlock_wait() is not necessary, because it was already performed in the past by the thread that increased complex_count and even though sem_perm.lock was dropped inbetween, no simple operation could have started, because simple operations cannot start when complex_count is non-zero. Signed-off-by: Manfred Spraul Cc: Mike Galbraith Cc: Rik van Riel Reviewed-by: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ipc/sem.c b/ipc/sem.c index 4a92c0447ad6..e20658d76bb5 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -257,12 +257,20 @@ static void sem_rcu_free(struct rcu_head *head) * Caller must own sem_perm.lock. * New simple ops cannot start, because simple ops first check * that sem_perm.lock is free. + * that a) sem_perm.lock is free and b) complex_count is 0. */ static void sem_wait_array(struct sem_array *sma) { int i; struct sem *sem; + if (sma->complex_count) { + /* The thread that increased sma->complex_count waited on + * all sem->lock locks. Thus we don't need to wait again. + */ + return; + } + for (i = 0; i < sma->sem_nsems; i++) { sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); From d8c633766ad88527f25d9f81a5c2f083d78a2b39 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Mon, 30 Sep 2013 13:45:07 -0700 Subject: [PATCH 06/22] ipc/sem.c: synchronize the proc interface The proc interface is not aware of sem_lock(), it instead calls ipc_lock_object() directly. This means that simple semop() operations can run in parallel with the proc interface. Right now, this is uncritical, because the implementation doesn't do anything that requires a proper synchronization. But it is dangerous and therefore should be fixed. Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Cc: Mike Galbraith Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ipc/sem.c b/ipc/sem.c index e20658d76bb5..cd6a733011a2 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -2103,6 +2103,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) struct sem_array *sma = it; time_t sem_otime; + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object() directly (in sysvipc_find_ipc). + * In order to stay compatible with sem_lock(), we must wait until + * all simple semop() calls have left their critical regions. + */ + sem_wait_array(sma); + sem_otime = get_semotime(sma); return seq_printf(s, From 4c1c7be95c345cf2ad537a0c48e9aeadc7304527 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 30 Sep 2013 13:45:08 -0700 Subject: [PATCH 07/22] kernel/kmod.c: check for NULL in call_usermodehelper_exec() If /proc/sys/kernel/core_pattern contains only "|", a NULL pointer dereference happens upon core dump because argv_split("") returns argv[0] == NULL. This bug was once fixed by commit 264b83c07a84 ("usermodehelper: check subprocess_info->path != NULL") but was by error reintroduced by commit 7f57cfa4e2aa ("usermodehelper: kill the sub_info->path[0] check"). This bug seems to exist since 2.6.19 (the version which core dump to pipe was added). Depending on kernel version and config, some side effect might happen immediately after this oops (e.g. kernel panic with 2.6.32-358.18.1.el6). Signed-off-by: Tetsuo Handa Acked-by: Oleg Nesterov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kmod.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/kmod.c b/kernel/kmod.c index fb326365b694..b086006c59e7 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) DECLARE_COMPLETION_ONSTACK(done); int retval = 0; + if (!sub_info->path) { + call_usermodehelper_freeinfo(sub_info); + return -EINVAL; + } helper_lock(); if (!khelper_wq || usermodehelper_disabled) { retval = -EBUSY; From 83b2944fd2532b92db099cb3ada12df32a05b368 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 30 Sep 2013 13:45:09 -0700 Subject: [PATCH 08/22] mm/bounce.c: fix a regression where MS_SNAP_STABLE (stable pages snapshotting) was ignored The "force" parameter in __blk_queue_bounce was being ignored, which means that stable page snapshots are not always happening (on ext3). This of course leads to DIF disks reporting checksum errors, so fix this regression. The regression was introduced in commit 6bc454d15004 ("bounce: Refactor __blk_queue_bounce to not use bi_io_vec") Reported-by: Mel Gorman Signed-off-by: Darrick J. Wong Cc: Kent Overstreet Cc: [3.10+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/bounce.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/bounce.c b/mm/bounce.c index c9f0a4339a7d..5a7d58fb883b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, struct bio_vec *to, *from; unsigned i; + if (force) + goto bounce; bio_for_each_segment(from, *bio_orig, i) if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) goto bounce; From 675217fd9950a668a33f9ee3fac3df34fd5113d0 Mon Sep 17 00:00:00 2001 From: Weiping Pan Date: Mon, 30 Sep 2013 13:45:10 -0700 Subject: [PATCH 09/22] Documentation/kernel-parameters.txt: replace kernelcore with Movable Han Pingtian found a typo in Documentation/kernel-parameters.txt about "kernelcore=", that "kernelcore" should be replaced with "Movable" here. Signed-off-by: Weiping Pan Acked-by: Mel Gorman Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 539a23631990..73d23fdc512b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1357,7 +1357,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. pages. In the event, a node is too small to have both kernelcore and Movable pages, kernelcore pages will take priority and other nodes will have a larger number - of kernelcore pages. The Movable zone is used for the + of Movable pages. The Movable zone is used for the allocation of pages that may be reclaimed or moved by the page migration subsystem. This means that HugeTLB pages may not be allocated from this zone. From 7f42ec3941560f0902fe3671e36f2c20ffd3af0a Mon Sep 17 00:00:00 2001 From: Vyacheslav Dubeyko Date: Mon, 30 Sep 2013 13:45:12 -0700 Subject: [PATCH 10/22] nilfs2: fix issue with race condition of competition between segments for dirty blocks Many NILFS2 users were reported about strange file system corruption (for example): NILFS: bad btree node (blocknr=185027): level = 0, flags = 0x0, nchildren = 768 NILFS error (device sda4): nilfs_bmap_last_key: broken bmap (inode number=11540) But such error messages are consequence of file system's issue that takes place more earlier. Fortunately, Jerome Poulin and Anton Eliasson were reported about another issue not so recently. These reports describe the issue with segctor thread's crash: BUG: unable to handle kernel paging request at 0000000000004c83 IP: nilfs_end_page_io+0x12/0xd0 [nilfs2] Call Trace: nilfs_segctor_do_construct+0xf25/0x1b20 [nilfs2] nilfs_segctor_construct+0x17b/0x290 [nilfs2] nilfs_segctor_thread+0x122/0x3b0 [nilfs2] kthread+0xc0/0xd0 ret_from_fork+0x7c/0xb0 These two issues have one reason. This reason can raise third issue too. Third issue results in hanging of segctor thread with eating of 100% CPU. REPRODUCING PATH: One of the possible way or the issue reproducing was described by Jermoe me Poulin : 1. init S to get to single user mode. 2. sysrq+E to make sure only my shell is running 3. start network-manager to get my wifi connection up 4. login as root and launch "screen" 5. cd /boot/log/nilfs which is a ext3 mount point and can log when NILFS dies. 6. lscp | xz -9e > lscp.txt.xz 7. mount my snapshot using mount -o cp=3360839,ro /dev/vgUbuntu/root /mnt/nilfs 8. start a screen to dump /proc/kmsg to text file since rsyslog is killed 9. start a screen and launch strace -f -o find-cat.log -t find /mnt/nilfs -type f -exec cat {} > /dev/null \; 10. start a screen and launch strace -f -o apt-get.log -t apt-get update 11. launch the last command again as it did not crash the first time 12. apt-get crashes 13. ps aux > ps-aux-crashed.log 13. sysrq+W 14. sysrq+E wait for everything to terminate 15. sysrq+SUSB Simplified way of the issue reproducing is starting kernel compilation task and "apt-get update" in parallel. REPRODUCIBILITY: The issue is reproduced not stable [60% - 80%]. It is very important to have proper environment for the issue reproducing. The critical conditions for successful reproducing: (1) It should have big modified file by mmap() way. (2) This file should have the count of dirty blocks are greater that several segments in size (for example, two or three) from time to time during processing. (3) It should be intensive background activity of files modification in another thread. INVESTIGATION: First of all, it is possible to see that the reason of crash is not valid page address: NILFS [nilfs_segctor_complete_write]:2100 bh->b_count 0, bh->b_blocknr 13895680, bh->b_size 13897727, bh->b_page 0000000000001a82 NILFS [nilfs_segctor_complete_write]:2101 segbuf->sb_segnum 6783 Moreover, value of b_page (0x1a82) is 6786. This value looks like segment number. And b_blocknr with b_size values look like block numbers. So, buffer_head's pointer points on not proper address value. Detailed investigation of the issue is discovered such picture: [-----------------------------SEGMENT 6783-------------------------------] NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect NILFS [nilfs_segctor_do_construct]:2336 nilfs_segctor_assign NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111149024, segbuf->sb_segnum 6783 [-----------------------------SEGMENT 6784-------------------------------] NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect NILFS [nilfs_lookup_dirty_data_buffers]:782 bh->b_count 1, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824 NILFS [nilfs_lookup_dirty_data_buffers]:783 bh->b_assoc_buffers.next ffff8802174a6798, bh->b_assoc_buffers.prev ffff880221cffee8 NILFS [nilfs_segctor_do_construct]:2336 nilfs_segctor_assign NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write NILFS [nilfs_segbuf_submit_bh]:575 bh->b_count 1, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824 NILFS [nilfs_segbuf_submit_bh]:576 segbuf->sb_segnum 6784 NILFS [nilfs_segbuf_submit_bh]:577 bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880218bcdf50 NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111150080, segbuf->sb_segnum 6784, segbuf->sb_nbio 0 [----------] ditto NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111164416, segbuf->sb_segnum 6784, segbuf->sb_nbio 15 [-----------------------------SEGMENT 6785-------------------------------] NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect NILFS [nilfs_lookup_dirty_data_buffers]:782 bh->b_count 2, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824 NILFS [nilfs_lookup_dirty_data_buffers]:783 bh->b_assoc_buffers.next ffff880219277e80, bh->b_assoc_buffers.prev ffff880221cffc88 NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write NILFS [nilfs_segbuf_submit_bh]:575 bh->b_count 2, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824 NILFS [nilfs_segbuf_submit_bh]:576 segbuf->sb_segnum 6785 NILFS [nilfs_segbuf_submit_bh]:577 bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880222cc7ee8 NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111165440, segbuf->sb_segnum 6785, segbuf->sb_nbio 0 [----------] ditto NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111177728, segbuf->sb_segnum 6785, segbuf->sb_nbio 12 NILFS [nilfs_segctor_do_construct]:2399 nilfs_segctor_wait NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6783 NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6784 NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6785 NILFS [nilfs_segctor_complete_write]:2100 bh->b_count 0, bh->b_blocknr 13895680, bh->b_size 13897727, bh->b_page 0000000000001a82 BUG: unable to handle kernel paging request at 0000000000001a82 IP: [] nilfs_end_page_io+0x12/0xd0 [nilfs2] Usually, for every segment we collect dirty files in list. Then, dirty blocks are gathered for every dirty file, prepared for write and submitted by means of nilfs_segbuf_submit_bh() call. Finally, it takes place complete write phase after calling nilfs_end_bio_write() on the block layer. Buffers/pages are marked as not dirty on final phase and processed files removed from the list of dirty files. It is possible to see that we had three prepare_write and submit_bio phases before segbuf_wait and complete_write phase. Moreover, segments compete between each other for dirty blocks because on every iteration of segments processing dirty buffer_heads are added in several lists of payload_buffers: [SEGMENT 6784]: bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880218bcdf50 [SEGMENT 6785]: bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880222cc7ee8 The next pointer is the same but prev pointer has changed. It means that buffer_head has next pointer from one list but prev pointer from another. Such modification can be made several times. And, finally, it can be resulted in various issues: (1) segctor hanging, (2) segctor crashing, (3) file system metadata corruption. FIX: This patch adds: (1) setting of BH_Async_Write flag in nilfs_segctor_prepare_write() for every proccessed dirty block; (2) checking of BH_Async_Write flag in nilfs_lookup_dirty_data_buffers() and nilfs_lookup_dirty_node_buffers(); (3) clearing of BH_Async_Write flag in nilfs_segctor_complete_write(), nilfs_abort_logs(), nilfs_forget_buffer(), nilfs_clear_dirty_page(). Reported-by: Jerome Poulin Reported-by: Anton Eliasson Cc: Paul Fertser Cc: ARAI Shun-ichi Cc: Piotr Szymaniak Cc: Juan Barry Manuel Canham Cc: Zahid Chowdhury Cc: Elmer Zhang Cc: Kenneth Langga Signed-off-by: Vyacheslav Dubeyko Acked-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/page.c | 2 ++ fs/nilfs2/segment.c | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0ba679866e50..da276640f776 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh) clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_checked(bh); clear_buffer_nilfs_redirected(bh); + clear_buffer_async_write(bh); clear_buffer_dirty(bh); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); @@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } + clear_buffer_async_write(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_checked(bh); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index bd88a7461063..9f6b486b6c01 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, bh = head = page_buffers(page); do { - if (!buffer_dirty(bh)) + if (!buffer_dirty(bh) || buffer_async_write(bh)) continue; get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); @@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, for (i = 0; i < pagevec_count(&pvec); i++) { bh = head = page_buffers(pvec.pages[i]); do { - if (buffer_dirty(bh)) { + if (buffer_dirty(bh) && + !buffer_async_write(bh)) { get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); @@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { + set_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) { lock_page(bd_page); @@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { + set_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { lock_page(bd_page); @@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err) list_for_each_entry(segbuf, logs, sb_list) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { + clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); @@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err) list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { + clear_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); @@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); + clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); @@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); + clear_buffer_async_write(bh); clear_buffer_delay(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_redirected(bh); From 2a156a6b52f45355d999b58b745eef93021cc81a Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 30 Sep 2013 13:45:13 -0700 Subject: [PATCH 11/22] include/asm-generic/vtime.h: avoid zero-length file patch(1) can't handle zero-length files - it appears to simply not create the file, so my powerpc build fails. Put something in here to make life easier. Cc: Hugh Dickins Cc: Frederic Weisbecker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/vtime.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h index e69de29bb2d1..b1a49677fe25 100644 --- a/include/asm-generic/vtime.h +++ b/include/asm-generic/vtime.h @@ -0,0 +1 @@ +/* no content, but patch(1) dislikes empty files */ From 0772dac1dc28ab5a67374303e58c0fe5bff15720 Mon Sep 17 00:00:00 2001 From: Felipe Pena Date: Mon, 30 Sep 2013 13:45:14 -0700 Subject: [PATCH 12/22] arch/parisc/mm/fault.c: fix uninitialized variable usage The FAULT_FLAG_WRITE flag has been set based on uninitialized variable. Fixes a regression added by commit 759496ba6407 ("arch: mm: pass userspace fault flag to generic fault handler") Signed-off-by: Felipe Pena Cc: Johannes Weiner Cc: Michal Hocko Cc: "James E.J. Bottomley" Cc: Helge Deller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/parisc/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index d10d27a720c0..00c0ed333a3d 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -182,6 +182,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + acc_type = parisc_acctyp(code, regs->iir); + if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; retry: @@ -196,8 +199,6 @@ retry: good_area: - acc_type = parisc_acctyp(code,regs->iir); - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; From 117aad1e9e4d97448d1df3f84b08bd65811e6d6a Mon Sep 17 00:00:00 2001 From: Rafael Aquini Date: Mon, 30 Sep 2013 13:45:16 -0700 Subject: [PATCH 13/22] mm: avoid reinserting isolated balloon pages into LRU lists Isolated balloon pages can wrongly end up in LRU lists when migrate_pages() finishes its round without draining all the isolated page list. The same issue can happen when reclaim_clean_pages_from_list() tries to reclaim pages from an isolated page list, before migration, in the CMA path. Such balloon page leak opens a race window against LRU lists shrinkers that leads us to the following kernel panic: BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 IP: [] shrink_page_list+0x24e/0x897 PGD 3cda2067 PUD 3d713067 PMD 0 Oops: 0000 [#1] SMP CPU: 0 PID: 340 Comm: kswapd0 Not tainted 3.12.0-rc1-22626-g4367597 #87 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 RIP: shrink_page_list+0x24e/0x897 RSP: 0000:ffff88003da499b8 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffff88003e82bd60 RCX: 00000000000657d5 RDX: 0000000000000000 RSI: 000000000000031f RDI: ffff88003e82bd40 RBP: ffff88003da49ab0 R08: 0000000000000001 R09: 0000000081121a45 R10: ffffffff81121a45 R11: ffff88003c4a9a28 R12: ffff88003e82bd40 R13: ffff88003da0e800 R14: 0000000000000001 R15: ffff88003da49d58 FS: 0000000000000000(0000) GS:ffff88003fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000067d9000 CR3: 000000003ace5000 CR4: 00000000000407b0 Call Trace: shrink_inactive_list+0x240/0x3de shrink_lruvec+0x3e0/0x566 __shrink_zone+0x94/0x178 shrink_zone+0x3a/0x82 balance_pgdat+0x32a/0x4c2 kswapd+0x2f0/0x372 kthread+0xa2/0xaa ret_from_fork+0x7c/0xb0 Code: 80 7d 8f 01 48 83 95 68 ff ff ff 00 4c 89 e7 e8 5a 7b 00 00 48 85 c0 49 89 c5 75 08 80 7d 8f 00 74 3e eb 31 48 8b 80 18 01 00 00 <48> 8b 74 0d 48 8b 78 30 be 02 00 00 00 ff d2 eb RIP [] shrink_page_list+0x24e/0x897 RSP CR2: 0000000000000028 ---[ end trace 703d2451af6ffbfd ]--- Kernel panic - not syncing: Fatal exception This patch fixes the issue, by assuring the proper tests are made at putback_movable_pages() & reclaim_clean_pages_from_list() to avoid isolated balloon pages being wrongly reinserted in LRU lists. [akpm@linux-foundation.org: clarify awkward comment text] Signed-off-by: Rafael Aquini Reported-by: Luiz Capitulino Tested-by: Luiz Capitulino Cc: Mel Gorman Cc: Rik van Riel Cc: Hugh Dickins Cc: Johannes Weiner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/balloon_compaction.h | 25 +++++++++++++++++++++++++ mm/migrate.c | 2 +- mm/vmscan.c | 4 +++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index f7f1d7169b11..089743ade734 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page) return false; } +/* + * isolated_balloon_page - identify an isolated balloon page on private + * compaction/migration page lists. + * + * After a compaction thread isolates a balloon page for migration, it raises + * the page refcount to prevent concurrent compaction threads from re-isolating + * the same page. For that reason putback_movable_pages(), or other routines + * that need to identify isolated balloon pages on private pagelists, cannot + * rely on balloon_page_movable() to accomplish the task. + */ +static inline bool isolated_balloon_page(struct page *page) +{ + /* Already isolated balloon pages, by default, have a raised refcount */ + if (page_flags_cleared(page) && !page_mapped(page) && + page_count(page) >= 2) + return __is_movable_balloon_page(page); + + return false; +} + /* * balloon_page_insert - insert a page into the balloon's page list and make * the page->mapping assignment accordingly. @@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page) return false; } +static inline bool isolated_balloon_page(struct page *page) +{ + return false; +} + static inline bool balloon_page_isolate(struct page *page) { return false; diff --git a/mm/migrate.c b/mm/migrate.c index 9c8d5f59d30b..a26bccd44ccb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l) list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (unlikely(balloon_page_movable(page))) + if (unlikely(isolated_balloon_page(page))) balloon_page_putback(page); else putback_lru_page(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index beb35778c69f..53f2f82f83ae 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -48,6 +48,7 @@ #include #include +#include #include "internal.h" @@ -1113,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { - if (page_is_file_cache(page) && !PageDirty(page)) { + if (page_is_file_cache(page) && !PageDirty(page) && + !isolated_balloon_page(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } From eadb41ae82f802105c0601aa8a0a0e7595826497 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 30 Sep 2013 13:45:18 -0700 Subject: [PATCH 14/22] mm/mlock.c: prevent walking off the end of a pagetable in no-pmd configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function __munlock_pagevec_fill() introduced in commit 7a8010cd3627 ("mm: munlock: manual pte walk in fast path instead of follow_page_mask()") uses pmd_addr_end() for restricting its operation within current page table. This is insufficient on architectures/configurations where pmd is folded and pmd_addr_end() just returns the end of the full range to be walked. In this case, it allows pte++ to walk off the end of a page table resulting in unpredictable behaviour. This patch fixes the function by using pgd_addr_end() and pud_addr_end() before pmd_addr_end(), which will yield correct page table boundary on all configurations. This is similar to what existing page walkers do when walking each level of the page table. Additionaly, the patch clarifies a comment for get_locked_pte() call in the function. Signed-off-by: Vlastimil Babka Reported-by: Fengguang Wu Reviewed-by: Bob Liu Cc: Jörn Engel Cc: Mel Gorman Cc: Michel Lespinasse Cc: Hugh Dickins Cc: Rik van Riel Cc: Johannes Weiner Cc: Michal Hocko Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/mlock.c b/mm/mlock.c index 67ba6da7d0e3..d480cd6fc475 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, /* * Initialize pte walk starting at the already pinned page where we - * are sure that there is a pte. + * are sure that there is a pte, as it was pinned under the same + * mmap_sem write op. */ pte = get_locked_pte(vma->vm_mm, start, &ptl); - end = min(end, pmd_addr_end(start, end)); + /* Make sure we do not cross the page table boundary */ + end = pgd_addr_end(start, end); + end = pud_addr_end(start, end); + end = pmd_addr_end(start, end); /* The page next to the pinned page is the first we will try to get */ start += PAGE_SIZE; From 080506ad0aa9c9fbc7879cdd290d55624da08c60 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 30 Sep 2013 13:45:19 -0700 Subject: [PATCH 15/22] block: change config option name for cmdline partition parsing Recently commit bab55417b10c ("block: support embedded device command line partition") introduced CONFIG_CMDLINE_PARSER. However, that name is too generic and sounds like it enables/disables generic kernel boot arg processing, when it really is block specific. Before this option becomes a part of a full/final release, add the BLK_ prefix to it so that it is clear in absence of any other context that it is block specific. In addition, fix up the following less critical items: - help text was not really at all helpful. - index file for Documentation was not updated - add the new arg to Documentation/kernel-parameters.txt - clarify wording in source comments Signed-off-by: Paul Gortmaker Cc: Jens Axboe Cc: Cai Zhiyong Cc: Wei Yongjun Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/block/00-INDEX | 2 ++ Documentation/block/cmdline-partition.txt | 8 ++++---- Documentation/kernel-parameters.txt | 4 ++++ block/Kconfig | 9 +++++++-- block/Makefile | 2 +- block/partitions/Kconfig | 4 ++-- block/partitions/cmdline.c | 8 ++++---- 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX index d18ecd827c40..929d9904f74b 100644 --- a/Documentation/block/00-INDEX +++ b/Documentation/block/00-INDEX @@ -6,6 +6,8 @@ capability.txt - Generic Block Device Capability (/sys/block//capability) cfq-iosched.txt - CFQ IO scheduler tunables +cmdline-partition.txt + - how to specify block device partitions on kernel command line data-integrity.txt - Block data integrity deadline-iosched.txt diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt index 2bbf4cc40c3f..525b9f6d7fb4 100644 --- a/Documentation/block/cmdline-partition.txt +++ b/Documentation/block/cmdline-partition.txt @@ -1,9 +1,9 @@ -Embedded device command line partition +Embedded device command line partition parsing ===================================================================== -Read block device partition table from command line. -The partition used for fixed block device (eMMC) embedded device. -It is no MBR, save storage space. Bootloader can be easily accessed +Support for reading the block device partition table from the command line. +It is typically used for fixed block (eMMC) embedded devices. +It has no MBR, so saves storage space. Bootloader can be easily accessed by absolute address of data on the block device. Users can easily change the partition. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 73d23fdc512b..fcbb736d55fe 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Format: ,, See header of drivers/net/hamradio/baycom_ser_hdx.c. + blkdevparts= Manual partition parsing of block device(s) for + embedded devices based on command line input. + See Documentation/block/cmdline-partition.txt + boot_delay= Milliseconds to delay each printk during boot. Values larger than 10 seconds (10000) are changed to no delay (0). diff --git a/block/Kconfig b/block/Kconfig index 7f38e40fee08..2429515c05c2 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING See Documentation/cgroups/blkio-controller.txt for more information. -config CMDLINE_PARSER +config BLK_CMDLINE_PARSER bool "Block device command line partition parser" default n ---help--- - Parsing command line, get the partitions information. + Enabling this option allows you to specify the partition layout from + the kernel boot args. This is typically of use for embedded devices + which don't otherwise have any standardized method for listing the + partitions on a block device. + + See Documentation/block/cmdline-partition.txt for more information. menu "Partition Types" diff --git a/block/Makefile b/block/Makefile index 4fa4be544ece..671a83d063a5 100644 --- a/block/Makefile +++ b/block/Makefile @@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o -obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o +obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig index 87a32086535d..9b29a996c311 100644 --- a/block/partitions/Kconfig +++ b/block/partitions/Kconfig @@ -263,7 +263,7 @@ config SYSV68_PARTITION config CMDLINE_PARTITION bool "Command line partition support" if PARTITION_ADVANCED - select CMDLINE_PARSER + select BLK_CMDLINE_PARSER help - Say Y here if you would read the partitions table from bootargs. + Say Y here if you want to read the partition table from bootargs. The format for the command line is just like mtdparts. diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c index 56cf4ffad51e..5141b563adf1 100644 --- a/block/partitions/cmdline.c +++ b/block/partitions/cmdline.c @@ -2,15 +2,15 @@ * Copyright (C) 2013 HUAWEI * Author: Cai Zhiyong * - * Read block device partition table from command line. - * The partition used for fixed block device (eMMC) embedded device. - * It is no MBR, save storage space. Bootloader can be easily accessed + * Read block device partition table from the command line. + * Typically used for fixed block (eMMC) embedded devices. + * It has no MBR, so saves storage space. Bootloader can be easily accessed * by absolute address of data on the block device. * Users can easily change the partition. * * The format for the command line is just like mtdparts. * - * Verbose config please reference "Documentation/block/cmdline-partition.txt" + * For further information, see "Documentation/block/cmdline-partition.txt" * */ From 20cb6cab52a21b46e3c0dc7bd23f004f810fb421 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 30 Sep 2013 13:45:21 -0700 Subject: [PATCH 16/22] mm/hwpoison: fix traversal of hugetlbfs pages to avoid printk flood madvise_hwpoison won't check if the page is small page or huge page and traverses in small page granularity against the range unconditionally, which result in a printk flood "MCE xxx: already hardware poisoned" if the page is a huge page. This patch fixes it by using compound_order(compound_head(page)) for huge page iterator. Testcase: #define _GNU_SOURCE #include #include #include #include #include #include #include #define PAGES_TO_TEST 3 #define PAGE_SIZE 4096 * 512 int main(void) { char *mem; int i; mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0); if (madvise(mem, PAGES_TO_TEST * PAGE_SIZE, MADV_HWPOISON) == -1) return -1; munmap(mem, PAGES_TO_TEST * PAGE_SIZE); return 0; } Signed-off-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Acked-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/madvise.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 6975bc812542..539eeb96b323 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma, */ static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) { + struct page *p; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - for (; start < end; start += PAGE_SIZE) { - struct page *p; + for (; start < end; start += PAGE_SIZE << + compound_order(compound_head(p))) { int ret; ret = get_user_pages_fast(start, 1, 0, &p); From e76d30e20be5fca46c185a1470b045b89f95edcb Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 30 Sep 2013 13:45:22 -0700 Subject: [PATCH 17/22] mm/hwpoison: fix test for a transparent huge page PageTransHuge() can't guarantee the page is a transparent huge page since it returns true for both transparent huge and hugetlbfs pages. This patch fixes it by checking the page is also !hugetlbfs page. Before patch: [ 121.571128] Injecting memory failure at pfn 23a200 [ 121.571141] MCE 0x23a200: huge page recovery: Delayed [ 140.355100] MCE: Memory failure is now running on 0x23a200 After patch: [ 94.290793] Injecting memory failure at pfn 23a000 [ 94.290800] MCE 0x23a000: huge page recovery: Delayed [ 105.722303] MCE: Software-unpoisoned page 0x23a000 Signed-off-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Acked-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 947ed5413279..3faba33c67cc 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1349,7 +1349,7 @@ int unpoison_memory(unsigned long pfn) * worked by memory_failure() and the page lock is not held yet. * In such case, we yield to memory_failure() and make unpoison fail. */ - if (PageTransHuge(page)) { + if (!PageHuge(page) && PageTransHuge(page)) { pr_info("MCE: Memory failure is now running on %#lx\n", pfn); return 0; } From 2d421acd1568cf2d83739a1afec8e18edb9e3d16 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 30 Sep 2013 13:45:23 -0700 Subject: [PATCH 18/22] mm/hwpoison: fix false report on 2nd attempt at page recovery If the page is poisoned by software injection w/ MF_COUNT_INCREASED flag, there is a false report during the 2nd attempt at page recovery which is not truthful. This patch fixes it by reporting the first attempt to try free buddy page recovery if MF_COUNT_INCREASED is set. Before patch: [ 346.332041] Injecting memory failure at pfn 200010 [ 346.332189] MCE 0x200010: free buddy, 2nd try page recovery: Delayed After patch: [ 297.742600] Injecting memory failure at pfn 200010 [ 297.742941] MCE 0x200010: free buddy page recovery: Delayed Reviewed-by: Naoya Horiguchi Acked-by: Andi Kleen Signed-off-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3faba33c67cc..bf3351b5115e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * shake_page could have turned it free. */ if (is_free_buddy_page(p)) { - action_result(pfn, "free buddy, 2nd try", - DELAYED); + if (flags & MF_COUNT_INCREASED) + action_result(pfn, "free buddy", DELAYED); + else + action_result(pfn, "free buddy, 2nd try", DELAYED); return 0; } action_result(pfn, "non LRU", IGNORED); From fb31ba30fb10fe0ee11739f51669d581b4a1412c Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 30 Sep 2013 13:45:24 -0700 Subject: [PATCH 19/22] mm/hwpoison: fix the lack of one reference count against poisoned page The lack of one reference count against poisoned page for hwpoison_inject w/o hwpoison_filter enabled result in hwpoison detect -1 users still referenced the page, however, the number should be 0 except the poison handler held one after successfully unmap. This patch fix it by hold one referenced count against poisoned page for hwpoison_inject w/ and w/o hwpoison_filter enabled. Before patch: [ 71.902112] Injecting memory failure at pfn 224706 [ 71.902137] MCE 0x224706: dirty LRU page recovery: Failed [ 71.902138] MCE 0x224706: dirty LRU page still referenced by -1 users After patch: [ 94.710860] Injecting memory failure at pfn 215b68 [ 94.710885] MCE 0x215b68: dirty LRU page recovery: Recovered Reviewed-by: Naoya Horiguchi Acked-by: Andi Kleen Signed-off-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hwpoison-inject.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index afc2daa91c60..4c84678371eb 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!hwpoison_filter_enable) - goto inject; if (!pfn_valid(pfn)) return -ENXIO; @@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val) if (!get_page_unless_zero(hpage)) return 0; + if (!hwpoison_filter_enable) + goto inject; + if (!PageLRU(p) && !PageHuge(p)) shake_page(p, 0); /* From 0e8c665699e953fa58dc1b0b0d09e5dce7343cc7 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Mon, 30 Sep 2013 13:45:25 -0700 Subject: [PATCH 20/22] ipc/sem.c: update sem_otime for all operations In commit 0a2b9d4c7967 ("ipc/sem.c: move wake_up_process out of the spinlock section"), the update of semaphore's sem_otime(last semop time) was moved to one central position (do_smart_update). But since do_smart_update() is only called for operations that modify the array, this means that wait-for-zero semops do not update sem_otime anymore. The fix is simple: Non-alter operations must update sem_otime. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Manfred Spraul Reported-by: Jia He Tested-by: Jia He Cc: Davidlohr Bueso Cc: Mike Galbraith Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index cd6a733011a2..8c4f59b0204a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -917,6 +917,24 @@ again: return semop_completed; } +/** + * set_semotime(sma, sops) - set sem_otime + * @sma: semaphore array + * @sops: operations that modified the array, may be NULL + * + * sem_otime is replicated to avoid cache line trashing. + * This function sets one instance to the current time. + */ +static void set_semotime(struct sem_array *sma, struct sembuf *sops) +{ + if (sops == NULL) { + sma->sem_base[0].sem_otime = get_seconds(); + } else { + sma->sem_base[sops[0].sem_num].sem_otime = + get_seconds(); + } +} + /** * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue * @sma: semaphore array @@ -967,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop } } } - if (otime) { - if (sops == NULL) { - sma->sem_base[0].sem_otime = get_seconds(); - } else { - sma->sem_base[sops[0].sem_num].sem_otime = - get_seconds(); - } - } + if (otime) + set_semotime(sma, sops); } - /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero @@ -1839,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, error = perform_atomic_semop(sma, sops, nsops, un, task_tgid_vnr(current)); - if (error <= 0) { - if (alter && error == 0) + if (error == 0) { + /* If the operation was successful, then do + * the required updates. + */ + if (alter) do_smart_update(sma, sops, nsops, 1, &tasks); - - goto out_unlock_free; + else + set_semotime(sma, sops); } + if (error <= 0) + goto out_unlock_free; /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. From 4271b05a227dc6175b66c3d9941aeab09048aeb2 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 30 Sep 2013 13:45:26 -0700 Subject: [PATCH 21/22] ipc,msg: prevent race with rmid in msgsnd,msgrcv This fixes a race in both msgrcv() and msgsnd() between finding the msg and actually dealing with the queue, as another thread can delete shmid underneath us if we are preempted before acquiring the kern_ipc_perm.lock. Manfred illustrates this nicely: Assume a preemptible kernel that is preempted just after msq = msq_obtain_object_check(ns, msqid) in do_msgrcv(). The only lock that is held is rcu_read_lock(). Now the other thread processes IPC_RMID. When the first task is resumed, then it will happily wait for messages on a deleted queue. Fix this by checking for if the queue has been deleted after taking the lock. Signed-off-by: Davidlohr Bueso Reported-by: Manfred Spraul Cc: Rik van Riel Cc: Mike Galbraith Cc: [3.11] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ipc/msg.c b/ipc/msg.c index 9e4310c546ae..558aa91186b6 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -695,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock0; + /* raced with RMID? */ + if (msq->q_perm.deleted) { + err = -EIDRM; + goto out_unlock0; + } + err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock0; @@ -901,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl goto out_unlock1; ipc_lock_object(&msq->q_perm); + + /* raced with RMID? */ + if (msq->q_perm.deleted) { + msg = ERR_PTR(-EIDRM); + goto out_unlock0; + } + msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* From 314a8ad0f18ac37887896b288939acd8cb17e208 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 30 Sep 2013 13:45:27 -0700 Subject: [PATCH 22/22] pidns: fix free_pid() to handle the first fork failure "case 0" in free_pid() assumes that disable_pid_allocation() should clear PIDNS_HASH_ADDING before the last pid goes away. However this doesn't happen if the first fork() fails to create the child reaper which should call disable_pid_allocation(). Signed-off-by: Oleg Nesterov Reviewed-by: "Eric W. Biederman" Cc: "Serge E. Hallyn" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/pid.c b/kernel/pid.c index ebe5e80b10f8..9b9a26698144 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -273,6 +273,11 @@ void free_pid(struct pid *pid) */ wake_up_process(ns->child_reaper); break; + case PIDNS_HASH_ADDING: + /* Handle a fork failure of the first process */ + WARN_ON(ns->child_reaper); + ns->nr_hashed = 0; + /* fall through */ case 0: schedule_work(&ns->proc_work); break;