mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
memblock: debug enhancements
Improve tracking of early memory allocations when memblock debug is enabled: * Add memblock_dbg() to memblock_phys_alloc_range() to get details about its usage * Make memblock allocator wrappers actually inline to track their callers in memblock debug messages -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEeOVYVaWZL5900a/pOQOGJssO/ZEFAl/XYXUTHHJwcHRAbGlu dXguaWJtLmNvbQAKCRA5A4Ymyw79kZfCB/0fIIVVoj6RGSjD37j0r8OecI6x5qnE CJ23+LrXpTOSUjdYXrDhc1/tUwqJaH870vT6mX4bxppYGurvPOIeG2MeVp4973Gr SXLGbwLYCz2XiG7n0D4uFMFIHG/4qN7GhzOJr7s1Bj1sckvOoT58fJtwV2HeJgcG VNUD5WXBnAgs5FPAvqO6xsg/RmUgGnkgLTqpmEM4uRA/XFau0ZYd2pihEo46U1hm dLVst4sJjurBRV7mdNjzCNphdMchXYvE6iFywxogaxG+z26w12ctQ94TcRyMXhbi 8aQ/JO3P5hvenD7Z1y2d9QLaoHtyLUf1dnSY/MNb5yn9SkoBRjV1/CLL =OMbF -----END PGP SIGNATURE----- Merge tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock Pull memblock updates from Mike Rapoport: "memblock debug enhancements. Improve tracking of early memory allocations when memblock debug is enabled: - Add memblock_dbg() to memblock_phys_alloc_range() to get details about its usage - Make memblock allocator wrappers actually inline to track their callers in memblock debug messages" * tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: mm: memblock: drop __init from memblock functions to make it inline mm: memblock: add more debug logs
This commit is contained in:
commit
fff875a183
@ -404,13 +404,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr,
|
||||
int nid);
|
||||
|
||||
static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_raw(phys_addr_t size,
|
||||
static inline void *memblock_alloc_raw(phys_addr_t size,
|
||||
phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
@ -418,7 +418,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_from(phys_addr_t size,
|
||||
static inline void *memblock_alloc_from(phys_addr_t size,
|
||||
phys_addr_t align,
|
||||
phys_addr_t min_addr)
|
||||
{
|
||||
@ -426,33 +426,33 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_low(phys_addr_t size,
|
||||
static inline void *memblock_alloc_low(phys_addr_t size,
|
||||
phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_node(phys_addr_t size,
|
||||
static inline void *memblock_alloc_node(phys_addr_t size,
|
||||
phys_addr_t align, int nid)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_early(phys_addr_t base,
|
||||
static inline void memblock_free_early(phys_addr_t base,
|
||||
phys_addr_t size)
|
||||
{
|
||||
memblock_free(base, size);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_early_nid(phys_addr_t base,
|
||||
static inline void memblock_free_early_nid(phys_addr_t base,
|
||||
phys_addr_t size, int nid)
|
||||
{
|
||||
memblock_free(base, size);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
{
|
||||
__memblock_free_late(base, size);
|
||||
}
|
||||
@ -460,7 +460,7 @@ static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
/*
|
||||
* Set the allocation direction to bottom-up or top-down.
|
||||
*/
|
||||
static inline void __init memblock_set_bottom_up(bool enable)
|
||||
static inline void memblock_set_bottom_up(bool enable)
|
||||
{
|
||||
memblock.bottom_up = enable;
|
||||
}
|
||||
|
@ -1419,6 +1419,9 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
|
||||
phys_addr_t start,
|
||||
phys_addr_t end)
|
||||
{
|
||||
memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
|
||||
__func__, (u64)size, (u64)align, &start, &end,
|
||||
(void *)_RET_IP_);
|
||||
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
|
||||
false);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user