mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
memblock: use separate iterators for memory and reserved regions
for_each_memblock() is used to iterate over memblock.memory in a few places that use data from memblock_region rather than the memory ranges. Introduce separate for_each_mem_region() and for_each_reserved_mem_region() to improve encapsulation of memblock internals from its users. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Baoquan He <bhe@redhat.com> Acked-by: Ingo Molnar <mingo@kernel.org> [x86] Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> [MIPS] Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> [.clang-format] Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Daniel Axtens <dja@axtens.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Emil Renner Berthing <kernel@esmil.dk> Cc: Hari Bathini <hbathini@linux.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: https://lkml.kernel.org/r/20200818151634.14343-18-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9f3d5eaa3c
commit
cc6de16805
@ -203,7 +203,7 @@ ForEachMacros:
|
||||
- 'for_each_matching_node'
|
||||
- 'for_each_matching_node_and_match'
|
||||
- 'for_each_member'
|
||||
- 'for_each_memblock'
|
||||
- 'for_each_mem_region'
|
||||
- 'for_each_memblock_type'
|
||||
- 'for_each_memcg_cache_index'
|
||||
- 'for_each_mem_pfn_range'
|
||||
@ -274,6 +274,7 @@ ForEachMacros:
|
||||
- 'for_each_requested_gpio'
|
||||
- 'for_each_requested_gpio_in_range'
|
||||
- 'for_each_reserved_mem_range'
|
||||
- 'for_each_reserved_mem_region'
|
||||
- 'for_each_rtd_codec_dais'
|
||||
- 'for_each_rtd_codec_dais_rollback'
|
||||
- 'for_each_rtd_components'
|
||||
|
@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
|
||||
if (!standard_resources)
|
||||
panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
|
||||
|
||||
for_each_memblock(memory, region) {
|
||||
for_each_mem_region(region) {
|
||||
res = &standard_resources[i++];
|
||||
if (memblock_is_nomap(region)) {
|
||||
res->name = "reserved";
|
||||
|
@ -354,7 +354,7 @@ static int __init numa_register_nodes(void)
|
||||
struct memblock_region *mblk;
|
||||
|
||||
/* Check that valid nid is set to memblks */
|
||||
for_each_memblock(memory, mblk) {
|
||||
for_each_mem_region(mblk) {
|
||||
int mblk_nid = memblock_get_region_node(mblk);
|
||||
|
||||
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
|
||||
|
@ -70,7 +70,7 @@ static void nlm_fixup_mem(void)
|
||||
const int pref_backup = 512;
|
||||
struct memblock_region *mem;
|
||||
|
||||
for_each_memblock(memory, mem) {
|
||||
for_each_mem_region(mem) {
|
||||
memblock_remove(mem->base + mem->size - pref_backup,
|
||||
pref_backup);
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ static void __init resource_init(void)
|
||||
{
|
||||
struct memblock_region *region;
|
||||
|
||||
for_each_memblock(memory, region) {
|
||||
for_each_mem_region(region) {
|
||||
struct resource *res;
|
||||
|
||||
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
||||
|
@ -514,7 +514,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
|
||||
* memory ranges, because quirks such as trim_snb_memory()
|
||||
* reserve specific pages for Sandy Bridge graphics. ]
|
||||
*/
|
||||
for_each_memblock(reserved, mb_region) {
|
||||
for_each_reserved_mem_region(mb_region) {
|
||||
int nid = memblock_get_region_node(mb_region);
|
||||
|
||||
if (nid != MAX_NUMNODES)
|
||||
|
@ -553,9 +553,22 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
|
||||
return PFN_UP(reg->base + reg->size);
|
||||
}
|
||||
|
||||
#define for_each_memblock(memblock_type, region) \
|
||||
for (region = memblock.memblock_type.regions; \
|
||||
region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
|
||||
/**
|
||||
* for_each_mem_region - itereate over memory regions
|
||||
* @region: loop variable
|
||||
*/
|
||||
#define for_each_mem_region(region) \
|
||||
for (region = memblock.memory.regions; \
|
||||
region < (memblock.memory.regions + memblock.memory.cnt); \
|
||||
region++)
|
||||
|
||||
/**
|
||||
* for_each_reserved_mem_region - itereate over reserved memory regions
|
||||
* @region: loop variable
|
||||
*/
|
||||
#define for_each_reserved_mem_region(region) \
|
||||
for (region = memblock.reserved.regions; \
|
||||
region < (memblock.reserved.regions + memblock.reserved.cnt); \
|
||||
region++)
|
||||
|
||||
extern void *alloc_large_system_hash(const char *tablename,
|
||||
|
@ -1667,7 +1667,7 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
|
||||
* the memory memblock regions, if the @limit exceeds the total size
|
||||
* of those regions, max_addr will keep original value PHYS_ADDR_MAX
|
||||
*/
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
if (limit <= r->size) {
|
||||
max_addr = r->base + limit;
|
||||
break;
|
||||
@ -1837,7 +1837,7 @@ void __init_memblock memblock_trim_memory(phys_addr_t align)
|
||||
phys_addr_t start, end, orig_start, orig_end;
|
||||
struct memblock_region *r;
|
||||
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
orig_start = r->base;
|
||||
orig_end = r->base + r->size;
|
||||
start = round_up(orig_start, align);
|
||||
|
@ -5961,7 +5961,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
|
||||
|
||||
if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
|
||||
if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
if (*pfn < memblock_region_memory_end_pfn(r))
|
||||
break;
|
||||
}
|
||||
@ -6546,7 +6546,7 @@ static unsigned long __init zone_absent_pages_in_node(int nid,
|
||||
unsigned long start_pfn, end_pfn;
|
||||
struct memblock_region *r;
|
||||
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
start_pfn = clamp(memblock_region_memory_base_pfn(r),
|
||||
zone_start_pfn, zone_end_pfn);
|
||||
end_pfn = clamp(memblock_region_memory_end_pfn(r),
|
||||
@ -7140,7 +7140,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
|
||||
* options.
|
||||
*/
|
||||
if (movable_node_is_enabled()) {
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
if (!memblock_is_hotpluggable(r))
|
||||
continue;
|
||||
|
||||
@ -7161,7 +7161,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
|
||||
if (mirrored_kernelcore) {
|
||||
bool mem_below_4gb_not_mirrored = false;
|
||||
|
||||
for_each_memblock(memory, r) {
|
||||
for_each_mem_region(r) {
|
||||
if (memblock_is_mirror(r))
|
||||
continue;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user