mm: fix xyz_noprof functions calling profiled functions

Grepping /proc/allocinfo for "noprof" reveals several xyz_noprof
functions, which means internally they are calling profiled functions. 
This should never happen as such calls move allocation charge from a
higher level location where it should be accounted for into these lower
level helpers.  Fix this by replacing profiled function calls with noprof
ones.

Link: https://lkml.kernel.org/r/20240531205350.3973009-1-surenb@google.com
Fixes: b951aaff50 ("mm: enable page allocation tagging")
Fixes: e26d8769da ("mempool: hook up to memory allocation profiling")
Fixes: 88ae5fb755 ("mm: vmalloc: enable memory allocation profiling")
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Reviewed-by: Kees Cook <kees@kernel.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2024-05-31 13:53:50 -07:00 committed by Andrew Morton
parent 3f0c44c8c2
commit 9415983599
3 changed files with 7 additions and 7 deletions

View File

@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
do { do {
cpuset_mems_cookie = read_mems_allowed_begin(); cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node(); n = cpuset_mem_spread_node();
folio = __folio_alloc_node(gfp, order, n); folio = __folio_alloc_node_noprof(gfp, order, n);
} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
return folio; return folio;

View File

@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
{ {
mempool_t *pool; mempool_t *pool;
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
if (!pool) if (!pool)
return NULL; return NULL;

View File

@ -705,7 +705,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
if (oldsize >= newsize) if (oldsize >= newsize)
return (void *)p; return (void *)p;
newp = kvmalloc(newsize, flags); newp = kvmalloc_noprof(newsize, flags);
if (!newp) if (!newp)
return NULL; return NULL;
memcpy(newp, p, oldsize); memcpy(newp, p, oldsize);
@ -726,7 +726,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
if (unlikely(check_mul_overflow(n, size, &bytes))) if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL; return NULL;
return __vmalloc(bytes, flags); return __vmalloc_noprof(bytes, flags);
} }
EXPORT_SYMBOL(__vmalloc_array_noprof); EXPORT_SYMBOL(__vmalloc_array_noprof);
@ -737,7 +737,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof);
*/ */
void *vmalloc_array_noprof(size_t n, size_t size) void *vmalloc_array_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL); return __vmalloc_array_noprof(n, size, GFP_KERNEL);
} }
EXPORT_SYMBOL(vmalloc_array_noprof); EXPORT_SYMBOL(vmalloc_array_noprof);
@ -749,7 +749,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof);
*/ */
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{ {
return __vmalloc_array(n, size, flags | __GFP_ZERO); return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
} }
EXPORT_SYMBOL(__vcalloc_noprof); EXPORT_SYMBOL(__vcalloc_noprof);
@ -760,7 +760,7 @@ EXPORT_SYMBOL(__vcalloc_noprof);
*/ */
void *vcalloc_noprof(size_t n, size_t size) void *vcalloc_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
} }
EXPORT_SYMBOL(vcalloc_noprof); EXPORT_SYMBOL(vcalloc_noprof);