mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
mm/vmalloc: improve allocation failure error messages
There are several reasons why a vmalloc can fail, virtual space exhausted, page array allocation failure, page allocation failure, and kernel page table allocation failure. Add distinct warning messages for the main causes of failure, with some added information like page order or allocation size where applicable. [urezki@gmail.com: print correct vmalloc allocation size] Link: https://lkml.kernel.org/r/20210329193214.GA28602@pc638.lan Link: https://lkml.kernel.org/r/20210322021806.892164-6-npiggin@gmail.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Cédric Le Goater <clg@kaod.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4ad0ae8c64
commit
d70bec8cc9
40
mm/vmalloc.c
40
mm/vmalloc.c
@ -2790,6 +2790,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||
|
||||
if (!pages) {
|
||||
free_vm_area(area);
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc size %lu allocation failure: "
|
||||
"page array size %lu allocation failed",
|
||||
nr_small_pages * PAGE_SIZE, array_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2814,6 +2818,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||
/* Successfully allocated i pages, free them in __vfree() */
|
||||
area->nr_pages = i;
|
||||
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc size %lu allocation failure: "
|
||||
"page order %u allocation failed",
|
||||
area->nr_pages * PAGE_SIZE, page_order);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -2825,15 +2833,17 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||
}
|
||||
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
|
||||
|
||||
if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0)
|
||||
if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) {
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc size %lu allocation failure: "
|
||||
"failed to map pages",
|
||||
area->nr_pages * PAGE_SIZE);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return area->addr;
|
||||
|
||||
fail:
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc: allocation failure, allocated %ld of %ld bytes",
|
||||
(area->nr_pages*PAGE_SIZE), size);
|
||||
__vfree(area->addr);
|
||||
return NULL;
|
||||
}
|
||||
@ -2867,9 +2877,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||
unsigned long real_align = align;
|
||||
unsigned int shift = PAGE_SHIFT;
|
||||
|
||||
if (!size || (size >> PAGE_SHIFT) > totalram_pages()) {
|
||||
area = NULL;
|
||||
goto fail;
|
||||
if (WARN_ON_ONCE(!size))
|
||||
return NULL;
|
||||
|
||||
if ((size >> PAGE_SHIFT) > totalram_pages()) {
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc size %lu allocation failure: "
|
||||
"exceeds total pages", real_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
|
||||
@ -2897,8 +2912,12 @@ again:
|
||||
size = PAGE_ALIGN(size);
|
||||
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
|
||||
vm_flags, start, end, node, gfp_mask, caller);
|
||||
if (!area)
|
||||
if (!area) {
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc size %lu allocation failure: "
|
||||
"vm_struct allocation failed", real_size);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
|
||||
if (!addr)
|
||||
@ -2923,11 +2942,6 @@ fail:
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!area) {
|
||||
/* Warn for area allocation, page allocations already warn */
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc: allocation failure: %lu bytes", real_size);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user