mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
ae4f976968
The various definitions of __pfn_to_phys() have been consolidated to
use a generic macro in include/asm-generic/memory_model.h. This hit
mainline in the form of 012dcef3f0
"mm: move __phys_to_pfn and
__pfn_to_phys to asm/generic/memory_model.h". When the generic macro
was implemented the type cast to phys_addr_t was dropped which caused
boot regressions on ARM platforms with more than 4GB of memory and
LPAE enabled.
It was suggested to use PFN_PHYS() defined in include/linux/pfn.h
as provides the correct logic and avoids further duplication.
Reported-by: kernelci.org bot <bot@kernelci.org>
Suggested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Tyler Baker <tyler.baker@linaro.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
#ifndef __ASM_MEMORY_MODEL_H
|
|
#define __ASM_MEMORY_MODEL_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#ifndef ARCH_PFN_OFFSET
|
|
#define ARCH_PFN_OFFSET (0UL)
|
|
#endif
|
|
|
|
#elif defined(CONFIG_DISCONTIGMEM)
|
|
|
|
#ifndef arch_pfn_to_nid
|
|
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
|
|
#endif
|
|
|
|
#ifndef arch_local_page_offset
|
|
#define arch_local_page_offset(pfn, nid) \
|
|
((pfn) - NODE_DATA(nid)->node_start_pfn)
|
|
#endif
|
|
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
/*
|
|
* supports 3 memory models.
|
|
*/
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
|
|
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
|
|
ARCH_PFN_OFFSET)
|
|
#elif defined(CONFIG_DISCONTIGMEM)
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
unsigned long __nid = arch_pfn_to_nid(__pfn); \
|
|
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
|
|
})
|
|
|
|
#define __page_to_pfn(pg) \
|
|
({ const struct page *__pg = (pg); \
|
|
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
|
(unsigned long)(__pg - __pgdat->node_mem_map) + \
|
|
__pgdat->node_start_pfn; \
|
|
})
|
|
|
|
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
|
|
/* memmap is virtually contiguous. */
|
|
#define __pfn_to_page(pfn) (vmemmap + (pfn))
|
|
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
|
|
|
|
#elif defined(CONFIG_SPARSEMEM)
|
|
/*
|
|
* Note: section's mem_map is encoded to reflect its start_pfn.
|
|
* section[i].section_mem_map == mem_map's address - start_pfn;
|
|
*/
|
|
#define __page_to_pfn(pg) \
|
|
({ const struct page *__pg = (pg); \
|
|
int __sec = page_to_section(__pg); \
|
|
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
|
|
})
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
struct mem_section *__sec = __pfn_to_section(__pfn); \
|
|
__section_mem_map_addr(__sec) + __pfn; \
|
|
})
|
|
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
|
|
|
|
/*
|
|
* Convert a physical address to a Page Frame Number and back
|
|
*/
|
|
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
|
|
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
|
|
|
|
#define page_to_pfn __page_to_pfn
|
|
#define pfn_to_page __pfn_to_page
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|