mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 12:14:01 +08:00
408fde81c1
This patch effectively eliminates direct use of pgdat->node_mem_map outside of the DISCONTIG code. On a flat memory system, these fields aren't currently used, neither are they on a sparsemem system. There was also a node_mem_map(nid) macro on many architectures. Its use along with the use of ->node_mem_map itself was not consistent. It has been removed in favor of two new, more explicit, arch-independent macros: pgdat_page_nr(pgdat, pagenr) nid_page_nr(nid, pagenr) I called them "pgdat" and "nid" because we overload the term "node" to mean "NUMA node", "DISCONTIG node" or "pg_data_t" in very confusing ways. I believe the newer names are much clearer. These macros can be overridden in the sparsemem case with a theoretically slower operation using node_start_pfn and pfn_to_page(), instead. We could make this the only behavior if people want, but I don't want to change too much at once. One thing at a time. This patch removes more code than it adds. Compile tested on alpha, alpha discontig, arm, arm-discontig, i386, i386 generic, NUMAQ, Summit, ppc64, ppc64 discontig, and x86_64. Full list here: http://sr71.net/patches/2.6.12/2.6.12-rc1-mhp2/configs/ Boot tested on NUMAQ, x86 SMP and ppc64 power4/5 LPARs. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Martin J. Bligh <mbligh@aracnet.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
131 lines
3.6 KiB
C
131 lines
3.6 KiB
C
/*
|
|
* Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
|
|
* Adapted for the alpha wildfire architecture Jan 2001.
|
|
*/
|
|
#ifndef _ASM_MMZONE_H_
|
|
#define _ASM_MMZONE_H_
|
|
|
|
#include <linux/config.h>
|
|
#include <asm/smp.h>
|
|
|
|
struct bootmem_data_t; /* stupid forward decl. */
|
|
|
|
/*
|
|
* Following are macros that are specific to this numa platform.
|
|
*/
|
|
|
|
extern pg_data_t node_data[];
|
|
|
|
#define alpha_pa_to_nid(pa) \
|
|
(alpha_mv.pa_to_nid \
|
|
? alpha_mv.pa_to_nid(pa) \
|
|
: (0))
|
|
#define node_mem_start(nid) \
|
|
(alpha_mv.node_mem_start \
|
|
? alpha_mv.node_mem_start(nid) \
|
|
: (0UL))
|
|
#define node_mem_size(nid) \
|
|
(alpha_mv.node_mem_size \
|
|
? alpha_mv.node_mem_size(nid) \
|
|
: ((nid) ? (0UL) : (~0UL)))
|
|
|
|
#define pa_to_nid(pa) alpha_pa_to_nid(pa)
|
|
#define NODE_DATA(nid) (&node_data[(nid)])
|
|
|
|
#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
|
|
|
|
#if 1
|
|
#define PLAT_NODE_DATA_LOCALNR(p, n) \
|
|
(((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
|
|
#else
|
|
static inline unsigned long
|
|
PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
|
|
{
|
|
unsigned long temp;
|
|
temp = p >> PAGE_SHIFT;
|
|
return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
/*
|
|
* Following are macros that each numa implementation must define.
|
|
*/
|
|
|
|
/*
|
|
* Given a kernel address, find the home node of the underlying memory.
|
|
*/
|
|
#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
|
|
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
|
|
|
#define local_mapnr(kvaddr) \
|
|
((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
|
|
|
|
/*
|
|
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
|
|
* and returns the kaddr corresponding to first physical page in the
|
|
* node's mem_map.
|
|
*/
|
|
#define LOCAL_BASE_ADDR(kaddr) \
|
|
((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
|
|
<< PAGE_SHIFT))
|
|
|
|
/* XXX: FIXME -- wli */
|
|
#define kern_addr_valid(kaddr) (0)
|
|
|
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
|
|
|
#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
|
|
|
|
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
|
|
#define pte_pfn(pte) (pte_val(pte) >> 32)
|
|
|
|
#define mk_pte(page, pgprot) \
|
|
({ \
|
|
pte_t pte; \
|
|
unsigned long pfn; \
|
|
\
|
|
pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
|
|
pfn += page_zone(page)->zone_start_pfn << 32; \
|
|
pte_val(pte) = pfn | pgprot_val(pgprot); \
|
|
\
|
|
pte; \
|
|
})
|
|
|
|
#define pte_page(x) \
|
|
({ \
|
|
unsigned long kvirt; \
|
|
struct page * __xx; \
|
|
\
|
|
kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
|
|
__xx = virt_to_page(kvirt); \
|
|
\
|
|
__xx; \
|
|
})
|
|
|
|
#define pfn_to_page(pfn) \
|
|
({ \
|
|
unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
|
|
(NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \
|
|
})
|
|
|
|
#define page_to_pfn(page) \
|
|
((page) - page_zone(page)->zone_mem_map + \
|
|
(page_zone(page)->zone_start_pfn))
|
|
|
|
#define page_to_pa(page) \
|
|
((( (page) - page_zone(page)->zone_mem_map ) \
|
|
+ page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
|
|
|
|
#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
|
|
#define pfn_valid(pfn) \
|
|
(((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
|
|
node_spanned_pages(pfn_to_nid(pfn))) \
|
|
|
|
#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
|
|
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
#endif /* _ASM_MMZONE_H_ */
|