mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 10:05:00 +08:00
c200d90049
Patch series "mm: kmemleak: store objects allocated with physical address separately and check when scan", v4. The kmemleak_*_phys() interface uses "min_low_pfn" and "max_low_pfn" to check address. But on some architectures, kmemleak_*_phys() is called before those two variables initialized. The following steps will be taken: 1) Add OBJECT_PHYS flag and rbtree for the objects allocated with physical address 2) Store physical address in objects if allocated with OBJECT_PHYS 3) Check the boundary when scan instead of in kmemleak_*_phys() This patch set will solve: https://lore.kernel.org/r/20220527032504.30341-1-yee.lee@mediatek.com https://lore.kernel.org/r/9dd08bb5-f39e-53d8-f88d-bec598a08c93@gmail.com v3: https://lore.kernel.org/r/20220609124950.1694394-1-patrick.wang.shcn@gmail.com v2: https://lore.kernel.org/r/20220603035415.1243913-1-patrick.wang.shcn@gmail.com v1: https://lore.kernel.org/r/20220531150823.1004101-1-patrick.wang.shcn@gmail.com This patch (of 4): Remove the unused kmemleak_not_leak_phys() function. And remove the min_count argument to kmemleak_alloc_phys() function, assume it's 0. Link: https://lkml.kernel.org/r/20220611035551.1823303-1-patrick.wang.shcn@gmail.com Link: https://lkml.kernel.org/r/20220611035551.1823303-2-patrick.wang.shcn@gmail.com Signed-off-by: Patrick Wang <patrick.wang.shcn@gmail.com> Suggested-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Yee Lee <yee.lee@mediatek.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
122 lines
3.1 KiB
C
122 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* include/linux/kmemleak.h
|
|
*
|
|
* Copyright (C) 2008 ARM Limited
|
|
* Written by Catalin Marinas <catalin.marinas@arm.com>
|
|
*/
|
|
|
|
#ifndef __KMEMLEAK_H
|
|
#define __KMEMLEAK_H
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#ifdef CONFIG_DEBUG_KMEMLEAK
|
|
|
|
extern void kmemleak_init(void) __init;
|
|
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
|
gfp_t gfp) __ref;
|
|
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
|
gfp_t gfp) __ref;
|
|
extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
|
|
gfp_t gfp) __ref;
|
|
extern void kmemleak_free(const void *ptr) __ref;
|
|
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
|
|
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
|
|
extern void kmemleak_update_trace(const void *ptr) __ref;
|
|
extern void kmemleak_not_leak(const void *ptr) __ref;
|
|
extern void kmemleak_ignore(const void *ptr) __ref;
|
|
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
|
|
extern void kmemleak_no_scan(const void *ptr) __ref;
|
|
extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
|
|
gfp_t gfp) __ref;
|
|
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
|
|
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
|
|
|
|
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
|
int min_count, slab_flags_t flags,
|
|
gfp_t gfp)
|
|
{
|
|
if (!(flags & SLAB_NOLEAKTRACE))
|
|
kmemleak_alloc(ptr, size, min_count, gfp);
|
|
}
|
|
|
|
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
|
|
{
|
|
if (!(flags & SLAB_NOLEAKTRACE))
|
|
kmemleak_free(ptr);
|
|
}
|
|
|
|
static inline void kmemleak_erase(void **ptr)
|
|
{
|
|
*ptr = NULL;
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void kmemleak_init(void)
|
|
{
|
|
}
|
|
static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
|
gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
|
int min_count, slab_flags_t flags,
|
|
gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
|
gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
|
|
gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_free(const void *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_free_part(const void *ptr, size_t size)
|
|
{
|
|
}
|
|
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
|
|
{
|
|
}
|
|
static inline void kmemleak_free_percpu(const void __percpu *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_update_trace(const void *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_not_leak(const void *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_ignore(const void *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_erase(void **ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_no_scan(const void *ptr)
|
|
{
|
|
}
|
|
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
|
|
gfp_t gfp)
|
|
{
|
|
}
|
|
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
|
|
{
|
|
}
|
|
static inline void kmemleak_ignore_phys(phys_addr_t phys)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_KMEMLEAK */
|
|
|
|
#endif /* __KMEMLEAK_H */
|