mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
c6830c2260
commit 21a3c96
uses node_start/end_pfn(nid) for detection start/end
of nodes. But, it's not defined in linux/mmzone.h but defined in
/arch/???/include/mmzone.h which is included only under
CONFIG_NEED_MULTIPLE_NODES=y.
Then, we see
mm/page_cgroup.c: In function 'page_cgroup_init':
mm/page_cgroup.c:308: error: implicit declaration of function 'node_start_pfn'
mm/page_cgroup.c:309: error: implicit declaration of function 'node_end_pfn'
So, fixiing page_cgroup.c is an idea...
But node_start_pfn()/node_end_pfn() is a very generic macro and
should be implemented in the same manner for all archs.
(m32r has different implementation...)
This patch removes definitions of node_start/end_pfn() in each archs
and defines a unified one in linux/mmzone.h. It's not under
CONFIG_NEED_MULTIPLE_NODES, now.
A result of macro expansion is here (mm/page_cgroup.c)
for !NUMA
start_pfn = ((&contig_page_data)->node_start_pfn);
end_pfn = ({ pg_data_t *__pgdat = (&contig_page_data); __pgdat->node_start_pfn + __pgdat->node_spanned_pages;});
for NUMA (x86-64)
start_pfn = ((node_data[nid])->node_start_pfn);
end_pfn = ({ pg_data_t *__pgdat = (node_data[nid]); __pgdat->node_start_pfn + __pgdat->node_spanned_pages;});
Changelog:
- fixed to avoid using "nid" twice in node_end_pfn() macro.
Reported-and-acked-by: Randy Dunlap <randy.dunlap@oracle.com>
Reported-and-tested-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
54 lines
1.3 KiB
C
54 lines
1.3 KiB
C
/*
|
|
* Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
|
|
*
|
|
*/
|
|
|
|
#ifndef _ASM_MMZONE_H_
|
|
#define _ASM_MMZONE_H_
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
extern struct pglist_data *node_data[];
|
|
#define NODE_DATA(nid) (node_data[nid])
|
|
|
|
#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
|
|
|
|
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
|
|
/*
|
|
* pfn_valid should be made as fast as possible, and the current definition
|
|
* is valid for machines that are NUMA, but still contiguous, which is what
|
|
* is currently supported. A more generalised, but slower definition would
|
|
* be something like this - mbligh:
|
|
* ( pfn_to_pgdat(pfn) && ((pfn) < node_end_pfn(pfn_to_nid(pfn))) )
|
|
*/
|
|
#if 1 /* M32R_FIXME */
|
|
#define pfn_valid(pfn) (1)
|
|
#else
|
|
#define pfn_valid(pfn) ((pfn) < num_physpages)
|
|
#endif
|
|
|
|
/*
|
|
* generic node memory support, the following assumptions apply:
|
|
*/
|
|
|
|
static __inline__ int pfn_to_nid(unsigned long pfn)
|
|
{
|
|
int node;
|
|
|
|
for (node = 0 ; node < MAX_NUMNODES ; node++)
|
|
if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
|
|
break;
|
|
|
|
return node;
|
|
}
|
|
|
|
static __inline__ struct pglist_data *pfn_to_pgdat(unsigned long pfn)
|
|
{
|
|
return(NODE_DATA(pfn_to_nid(pfn)));
|
|
}
|
|
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
#endif /* _ASM_MMZONE_H_ */
|