mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
mm/sparse: move buffer init/fini to the common place
Now that both variants of sparse memory use the same buffers to populate memory map, we can move sparse_buffer_init()/sparse_buffer_fini() to the common place. Link: http://lkml.kernel.org/r/20180712203730.8703-4-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Tested-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Tested-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com> Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e131c06b14
commit
afda57bc13
@ -2671,9 +2671,6 @@ void sparse_mem_maps_populate_node(struct page **map_map,
|
||||
unsigned long map_count,
|
||||
int nodeid);
|
||||
|
||||
unsigned long __init section_map_size(void);
|
||||
void sparse_buffer_init(unsigned long size, int nid);
|
||||
void sparse_buffer_fini(void);
|
||||
void *sparse_buffer_alloc(unsigned long size);
|
||||
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
|
||||
struct vmem_altmap *altmap);
|
||||
|
@ -270,7 +270,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
|
||||
unsigned long pnum;
|
||||
int nr_consumed_maps = 0;
|
||||
|
||||
sparse_buffer_init(section_map_size() * map_count, nodeid);
|
||||
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
||||
if (!present_section_nr(pnum))
|
||||
continue;
|
||||
@ -282,5 +281,4 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
|
||||
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
|
||||
__func__);
|
||||
}
|
||||
sparse_buffer_fini();
|
||||
}
|
||||
|
14
mm/sparse.c
14
mm/sparse.c
@ -401,14 +401,14 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
unsigned long __init section_map_size(void)
|
||||
static unsigned long __init section_map_size(void)
|
||||
|
||||
{
|
||||
return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
|
||||
}
|
||||
|
||||
#else
|
||||
unsigned long __init section_map_size(void)
|
||||
static unsigned long __init section_map_size(void)
|
||||
{
|
||||
return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
|
||||
}
|
||||
@ -433,10 +433,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
|
||||
unsigned long map_count, int nodeid)
|
||||
{
|
||||
unsigned long pnum;
|
||||
unsigned long size = section_map_size();
|
||||
int nr_consumed_maps = 0;
|
||||
|
||||
sparse_buffer_init(size * map_count, nodeid);
|
||||
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
||||
if (!present_section_nr(pnum))
|
||||
continue;
|
||||
@ -447,14 +445,13 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
|
||||
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
|
||||
__func__);
|
||||
}
|
||||
sparse_buffer_fini();
|
||||
}
|
||||
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
static void *sparsemap_buf __meminitdata;
|
||||
static void *sparsemap_buf_end __meminitdata;
|
||||
|
||||
void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
static void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
{
|
||||
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
|
||||
sparsemap_buf =
|
||||
@ -464,7 +461,7 @@ void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
sparsemap_buf_end = sparsemap_buf + size;
|
||||
}
|
||||
|
||||
void __init sparse_buffer_fini(void)
|
||||
static void __init sparse_buffer_fini(void)
|
||||
{
|
||||
unsigned long size = sparsemap_buf_end - sparsemap_buf;
|
||||
|
||||
@ -494,8 +491,11 @@ static void __init sparse_early_mem_maps_alloc_node(void *data,
|
||||
unsigned long map_count, int nodeid)
|
||||
{
|
||||
struct page **map_map = (struct page **)data;
|
||||
|
||||
sparse_buffer_init(section_map_size() * map_count, nodeid);
|
||||
sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
|
||||
map_count, nodeid);
|
||||
sparse_buffer_fini();
|
||||
}
|
||||
#else
|
||||
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
|
||||
|
Loading…
Reference in New Issue
Block a user