linux/mm/memory_hotplug.c
Dave Hansen 3947be1969 [PATCH] memory hotplug: sysfs and add/remove functions
This adds generic memory add/remove and supporting functions for memory
hotplug into a new file as well as a memory hotplug kernel config option.

Individual architecture patches will follow.

For now, disable memory hotplug when swsusp is enabled.  There's a lot of
churn there right now.  We'll fix it up properly once it calms down.

Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-29 21:40:44 -07:00

179 lines
4.4 KiB
C

/*
* linux/mm/memory_hotplug.c
*
* Copyright (C)
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/pagevec.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
struct page *page, *ret;
unsigned long memmap_size = sizeof(struct page) * nr_pages;
page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
if (page)
goto got_map_page;
ret = vmalloc(memmap_size);
if (ret)
goto got_map_ptr;
return NULL;
got_map_page:
ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:
memset(ret, 0, memmap_size);
return ret;
}
extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
unsigned long size);
static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
zone_type = zone - pgdat->node_zones;
memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
}
extern int sparse_add_one_section(struct zone *, unsigned long,
struct page *mem_map);
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
struct page *memmap;
int ret;
/*
* This can potentially allocate memory, and does its own
* internal locking.
*/
sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id);
pgdat_resize_lock(pgdat, &flags);
memmap = __kmalloc_section_memmap(nr_pages);
ret = sparse_add_one_section(zone, phys_start_pfn, memmap);
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
/* the mem_map didn't get used */
if (memmap >= (struct page *)VMALLOC_START &&
memmap < (struct page *)VMALLOC_END)
vfree(memmap);
else
free_pages((unsigned long)memmap,
get_order(sizeof(struct page) * nr_pages));
}
if (ret < 0)
return ret;
__add_zone(zone, phys_start_pfn);
return register_new_memory(__pfn_to_section(phys_start_pfn));
}
/*
* Reasonably generic function for adding memory. It is
* expected that archs that support memory hotplug will
* call this function after deciding the zone to which to
* add the new pages.
*/
int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i;
int err = 0;
for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
err = __add_section(zone, phys_start_pfn + i);
if (err)
break;
}
return err;
}
static void grow_zone_span(struct zone *zone,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long old_zone_end_pfn;
zone_span_writelock(zone);
old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
if (end_pfn > old_zone_end_pfn)
zone->spanned_pages = end_pfn - zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
static void grow_pgdat_span(struct pglist_data *pgdat,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long old_pgdat_end_pfn =
pgdat->node_start_pfn + pgdat->node_spanned_pages;
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
if (end_pfn > old_pgdat_end_pfn)
pgdat->node_spanned_pages = end_pfn - pgdat->node_spanned_pages;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
{
unsigned long i;
unsigned long flags;
unsigned long onlined_pages = 0;
struct zone *zone;
/*
* This doesn't need a lock to do pfn_to_page().
* The section can't be removed here because of the
* memory_block->state_sem.
*/
zone = page_zone(pfn_to_page(pfn));
pgdat_resize_lock(zone->zone_pgdat, &flags);
grow_zone_span(zone, pfn, pfn + nr_pages);
grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
online_page(page);
onlined_pages++;
}
zone->present_pages += onlined_pages;
return 0;
}