2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00
linux-next/drivers/base/memory.c
Michal Hocko c6f03e2903 mm, memory_hotplug: remove zone restrictions
Historically we have enforced that any kernel zone (e.g ZONE_NORMAL) has
to precede the Movable zone in the physical memory range.  The purpose
of the movable zone is, however, not bound to any physical memory
restriction.  It merely defines a class of migrateable and reclaimable
memory.

There are users (e.g.  CMA) who might want to reserve specific physical
memory ranges for their own purpose.  Moreover our pfn walkers have to
be prepared for zones overlapping in the physical range already because
we do support interleaving NUMA nodes and therefore zones can interleave
as well.  This means we can allow each memory block to be associated
with a different zone.

Loosen the current onlining semantic and allow explicit onlining type on
any memblock.  That means that online_{kernel,movable} will be allowed
regardless of the physical address of the memblock as long as it is
offline of course.  This might result in moveble zone overlapping with
other kernel zones.  Default onlining then becomes a bit tricky but
still sensible.  echo online > memoryXY/state will online the given
block to

	1) the default zone if the given range is outside of any zone
	2) the enclosing zone if such a zone doesn't interleave with
	   any other zone
        3) the default zone if more zones interleave for this range

where default zone is movable zone only if movable_node is enabled
otherwise it is a kernel zone.

Here is an example of the semantic with (movable_node is not present but
it work in an analogous way). We start with following memblocks, all of
them offline:

  memory34/valid_zones:Normal Movable
  memory35/valid_zones:Normal Movable
  memory36/valid_zones:Normal Movable
  memory37/valid_zones:Normal Movable
  memory38/valid_zones:Normal Movable
  memory39/valid_zones:Normal Movable
  memory40/valid_zones:Normal Movable
  memory41/valid_zones:Normal Movable

Now, we online block 34 in default mode and block 37 as movable

  root@test1:/sys/devices/system/node/node1# echo online > memory34/state
  root@test1:/sys/devices/system/node/node1# echo online_movable > memory37/state
  memory34/valid_zones:Normal
  memory35/valid_zones:Normal Movable
  memory36/valid_zones:Normal Movable
  memory37/valid_zones:Movable
  memory38/valid_zones:Normal Movable
  memory39/valid_zones:Normal Movable
  memory40/valid_zones:Normal Movable
  memory41/valid_zones:Normal Movable

As we can see all other blocks can still be onlined both into Normal and
Movable zones and the Normal is default because the Movable zone spans
only block37 now.

  root@test1:/sys/devices/system/node/node1# echo online_movable > memory41/state
  memory34/valid_zones:Normal
  memory35/valid_zones:Normal Movable
  memory36/valid_zones:Normal Movable
  memory37/valid_zones:Movable
  memory38/valid_zones:Movable Normal
  memory39/valid_zones:Movable Normal
  memory40/valid_zones:Movable Normal
  memory41/valid_zones:Movable

Now the default zone for blocks 37-41 has changed because movable zone
spans that range.

  root@test1:/sys/devices/system/node/node1# echo online_kernel > memory39/state
  memory34/valid_zones:Normal
  memory35/valid_zones:Normal Movable
  memory36/valid_zones:Normal Movable
  memory37/valid_zones:Movable
  memory38/valid_zones:Normal Movable
  memory39/valid_zones:Normal
  memory40/valid_zones:Movable Normal
  memory41/valid_zones:Movable

Note that the block 39 now belongs to the zone Normal and so block38
falls into Normal by default as well.

For completness

  root@test1:/sys/devices/system/node/node1# for i in memory[34]?
  do
	echo online > $i/state 2>/dev/null
  done

  memory34/valid_zones:Normal
  memory35/valid_zones:Normal
  memory36/valid_zones:Normal
  memory37/valid_zones:Movable
  memory38/valid_zones:Normal
  memory39/valid_zones:Normal
  memory40/valid_zones:Movable
  memory41/valid_zones:Movable

Implementation wise the change is quite straightforward.  We can get rid
of allow_online_pfn_range altogether.  online_pages allows only offline
nodes already.  The original default_zone_for_pfn will become
default_kernel_zone_for_pfn.  New default_zone_for_pfn implements the
above semantic.  zone_for_pfn_range is slightly reorganized to implement
kernel and movable online type explicitly and MMOP_ONLINE_KEEP becomes a
catch all default behavior.

Link: http://lkml.kernel.org/r/20170714121233.16861-3-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Reza Arbab <arbab@linux.vnet.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Kani Toshimitsu <toshi.kani@hpe.com>
Cc: <slaoub@gmail.com>
Cc: Daniel Kiper <daniel.kiper@oracle.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-06 17:27:25 -07:00

851 lines
20 KiB
C

/*
* Memory subsystem support
*
* Written by Matt Tolentino <matthew.e.tolentino@intel.com>
* Dave Hansen <haveblue@us.ibm.com>
*
* This file provides the necessary infrastructure to represent
* a SPARSEMEM-memory-model system's physical memory in /sysfs.
* All arch-independent code that assumes MEMORY_HOTPLUG requires
* SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/topology.h>
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
static int sections_per_block;
static inline int base_memory_block_id(int section_nr)
{
return section_nr / sections_per_block;
}
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
.dev_name = MEMORY_CLASS_NAME,
.online = memory_subsys_online,
.offline = memory_subsys_offline,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&memory_chain, nb);
}
EXPORT_SYMBOL(register_memory_notifier);
void unregister_memory_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&memory_chain, nb);
}
EXPORT_SYMBOL(unregister_memory_notifier);
static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
int register_memory_isolate_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&memory_isolate_chain, nb);
}
EXPORT_SYMBOL(register_memory_isolate_notifier);
void unregister_memory_isolate_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
}
EXPORT_SYMBOL(unregister_memory_isolate_notifier);
static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
kfree(mem);
}
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
}
static unsigned long get_memory_block_size(void)
{
unsigned long block_sz;
block_sz = memory_block_size_bytes();
/* Validate blk_sz is a power of 2 and not less than section size */
if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
WARN_ON(1);
block_sz = MIN_MEMORY_BLOCK_SIZE;
}
return block_sz;
}
/*
* use this as the physical section index that this memsection
* uses.
*/
static ssize_t show_mem_start_phys_index(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sprintf(buf, "%08lx\n", phys_index);
}
/*
* Show whether the section of memory is likely to be hot-removable
*/
static ssize_t show_mem_removable(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long i, pfn;
int ret = 1;
struct memory_block *mem = to_memory_block(dev);
if (mem->state != MEM_ONLINE)
goto out;
for (i = 0; i < sections_per_block; i++) {
if (!present_section_nr(mem->start_section_nr + i))
continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
out:
return sprintf(buf, "%d\n", ret);
}
/*
* online, offline, going offline, etc.
*/
static ssize_t show_mem_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
ssize_t len = 0;
/*
* We can probably put these states in a nice little array
* so that they're not open-coded
*/
switch (mem->state) {
case MEM_ONLINE:
len = sprintf(buf, "online\n");
break;
case MEM_OFFLINE:
len = sprintf(buf, "offline\n");
break;
case MEM_GOING_OFFLINE:
len = sprintf(buf, "going-offline\n");
break;
default:
len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
mem->state);
WARN_ON(1);
break;
}
return len;
}
int memory_notify(unsigned long val, void *v)
{
return blocking_notifier_call_chain(&memory_chain, val, v);
}
int memory_isolate_notify(unsigned long val, void *v)
{
return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
}
/*
* The probe routines leave the pages reserved, just as the bootmem code does.
* Make sure they're still that way.
*/
static bool pages_correctly_reserved(unsigned long start_pfn)
{
int i, j;
struct page *page;
unsigned long pfn = start_pfn;
/*
* memmap between sections is not contiguous except with
* SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section
*/
for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false;
page = pfn_to_page(pfn);
for (j = 0; j < PAGES_PER_SECTION; j++) {
if (PageReserved(page + j))
continue;
printk(KERN_WARNING "section number %ld page number %d "
"not reserved, was it already online?\n",
pfn_to_section_nr(pfn), j);
return false;
}
}
return true;
}
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
* Must already be protected by mem_hotplug_begin().
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
switch (action) {
case MEM_ONLINE:
if (!pages_correctly_reserved(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
break;
case MEM_OFFLINE:
ret = offline_pages(start_pfn, nr_pages);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
"%ld\n", __func__, phys_index, action, action);
ret = -EINVAL;
}
return ret;
}
static int memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
if (mem->state != from_state_req)
return -EINVAL;
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem->start_section_nr, to_state,
mem->online_type);
mem->state = ret ? from_state_req : to_state;
return ret;
}
/* The device lock serializes operations on memory_subsys_[online|offline] */
static int memory_subsys_online(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
int ret;
if (mem->state == MEM_ONLINE)
return 0;
/*
* If we are called from store_mem_state(), online_type will be
* set >= 0 Otherwise we were called from the device online
* attribute and need to set the online_type.
*/
if (mem->online_type < 0)
mem->online_type = MMOP_ONLINE_KEEP;
/* Already under protection of mem_hotplug_begin() */
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
/* clear online_type */
mem->online_type = -1;
return ret;
}
static int memory_subsys_offline(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
if (mem->state == MEM_OFFLINE)
return 0;
/* Can't offline block with non-present sections */
if (mem->section_count != sections_per_block)
return -EINVAL;
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct memory_block *mem = to_memory_block(dev);
int ret, online_type;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
if (sysfs_streq(buf, "online_kernel"))
online_type = MMOP_ONLINE_KERNEL;
else if (sysfs_streq(buf, "online_movable"))
online_type = MMOP_ONLINE_MOVABLE;
else if (sysfs_streq(buf, "online"))
online_type = MMOP_ONLINE_KEEP;
else if (sysfs_streq(buf, "offline"))
online_type = MMOP_OFFLINE;
else {
ret = -EINVAL;
goto err;
}
/*
* Memory hotplug needs to hold mem_hotplug_begin() for probe to find
* the correct memory block to online before doing device_online(dev),
* which will take dev->mutex. Take the lock early to prevent an
* inversion, memory_subsys_online() callbacks will be implemented by
* assuming it's already protected.
*/
mem_hotplug_begin();
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE_KEEP:
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
case MMOP_OFFLINE:
ret = device_offline(&mem->dev);
break;
default:
ret = -EINVAL; /* should never happen */
}
mem_hotplug_done();
err:
unlock_device_hotplug();
if (ret < 0)
return ret;
if (ret)
return -EINVAL;
return count;
}
/*
* phys_device is a bad name for this. What I really want
* is a way to differentiate between memory ranges that
* are part of physical devices that constitute
* a complete removable unit or fru.
* i.e. do these ranges belong to the same physical device,
* s.t. if I offline all of these sections I can then
* remove the physical device?
*/
static ssize_t show_phys_device(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
unsigned long nr_pages, int online_type,
struct zone *default_zone)
{
struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
if (zone != default_zone) {
strcat(buf, " ");
strcat(buf, zone->name);
}
}
static ssize_t show_valid_zones(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long valid_start_pfn, valid_end_pfn;
struct zone *default_zone;
int nid;
/*
* The block contains more than one zone can not be offlined.
* This can happen e.g. for ZONE_DMA and ZONE_DMA32
*/
if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
return sprintf(buf, "none\n");
start_pfn = valid_start_pfn;
nr_pages = valid_end_pfn - start_pfn;
/*
* Check the existing zone. Make sure that we do that only on the
* online nodes otherwise the page_zone is not reliable
*/
if (mem->state == MEM_ONLINE) {
strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
goto out;
}
nid = pfn_to_nid(start_pfn);
default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
strcat(buf, default_zone->name);
print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
default_zone);
print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
default_zone);
out:
strcat(buf, "\n");
return strlen(buf);
}
static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
#endif
static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
/*
* Block size attribute stuff
*/
static ssize_t
print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lx\n", get_memory_block_size());
}
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
/*
* Memory auto online policy.
*/
static ssize_t
show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
char *buf)
{
if (memhp_auto_online)
return sprintf(buf, "online\n");
else
return sprintf(buf, "offline\n");
}
static ssize_t
store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
if (sysfs_streq(buf, "online"))
memhp_auto_online = true;
else if (sysfs_streq(buf, "offline"))
memhp_auto_online = false;
else
return -EINVAL;
return count;
}
static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
store_auto_online_blocks);
/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
* as well as ppc64 will do all of their discovery in userspace
* and will require this interface.
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
int nid, ret;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
ret = kstrtoull(buf, 0, &phys_addr);
if (ret)
return ret;
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = add_memory(nid, phys_addr,
MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret)
goto out;
ret = count;
out:
return ret;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
#endif
#ifdef CONFIG_MEMORY_FAILURE
/*
* Support for offlining pages of memory
*/
/* Soft offline a page */
static ssize_t
store_soft_offline_page(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
/* Forcibly offline a page, including killing processes. */
static ssize_t
store_hard_offline_page(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0, 0);
return ret ? ret : count;
}
static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
#endif
/*
* Note that phys_device is optional. It is here to allow for
* differentiation between which *physical* devices each
* section belongs to...
*/
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
{
return 0;
}
/*
* A reference for the returned object is held and the reference for the
* hinted object is released.
*/
struct memory_block *find_memory_block_hinted(struct mem_section *section,
struct memory_block *hint)
{
int block_id = base_memory_block_id(__section_nr(section));
struct device *hintdev = hint ? &hint->dev : NULL;
struct device *dev;
dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
if (hint)
put_device(&hint->dev);
if (!dev)
return NULL;
return to_memory_block(dev);
}
/*
* For now, we have a linear search to go find the appropriate
* memory_block corresponding to a particular phys_index. If
* this gets to be a real problem, we can always use a radix
* tree or something here.
*
* This could be made generic for all device subsystems.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
return find_memory_block_hinted(section, NULL);
}
static struct attribute *memory_memblk_attrs[] = {
&dev_attr_phys_index.attr,
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
#ifdef CONFIG_MEMORY_HOTREMOVE
&dev_attr_valid_zones.attr,
#endif
NULL
};
static struct attribute_group memory_memblk_attr_group = {
.attrs = memory_memblk_attrs,
};
static const struct attribute_group *memory_memblk_attr_groups[] = {
&memory_memblk_attr_group,
NULL,
};
/*
* register_memory - Setup a sysfs device for a memory block
*/
static
int register_memory(struct memory_block *memory)
{
memory->dev.bus = &memory_subsys;
memory->dev.id = memory->start_section_nr / sections_per_block;
memory->dev.release = memory_block_release;
memory->dev.groups = memory_memblk_attr_groups;
memory->dev.offline = memory->state == MEM_OFFLINE;
return device_register(&memory->dev);
}
static int init_memory_block(struct memory_block **memory,
struct mem_section *section, unsigned long state)
{
struct memory_block *mem;
unsigned long start_pfn;
int scn_nr;
int ret = 0;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
scn_nr = __section_nr(section);
mem->start_section_nr =
base_memory_block_id(scn_nr) * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
ret = register_memory(mem);
*memory = mem;
return ret;
}
static int add_memory_block(int base_section_nr)
{
struct memory_block *mem;
int i, ret, section_count = 0, section_nr;
for (i = base_section_nr;
(i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
i++) {
if (!present_section_nr(i))
continue;
if (section_count == 0)
section_nr = i;
section_count++;
}
if (section_count == 0)
return 0;
ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
if (ret)
return ret;
mem->section_count = section_count;
return 0;
}
/*
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
int register_new_memory(int nid, struct mem_section *section)
{
int ret = 0;
struct memory_block *mem;
mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section);
if (mem) {
mem->section_count++;
put_device(&mem->dev);
} else {
ret = init_memory_block(&mem, section, MEM_OFFLINE);
if (ret)
goto out;
mem->section_count++;
}
if (mem->section_count == sections_per_block)
ret = register_mem_sect_under_node(mem, nid);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static void
unregister_memory(struct memory_block *memory)
{
BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
static int remove_memory_section(unsigned long node_id,
struct mem_section *section, int phys_device)
{
struct memory_block *mem;
mutex_lock(&mem_sysfs_mutex);
/*
* Some users of the memory hotplug do not want/need memblock to
* track all sections. Skip over those.
*/
mem = find_memory_block(section);
if (!mem)
goto out_unlock;
unregister_mem_sect_under_nodes(mem, __section_nr(section));
mem->section_count--;
if (mem->section_count == 0)
unregister_memory(mem);
else
put_device(&mem->dev);
out_unlock:
mutex_unlock(&mem_sysfs_mutex);
return 0;
}
int unregister_memory_section(struct mem_section *section)
{
if (!present_section(section))
return -EINVAL;
return remove_memory_section(0, section, 0);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
{
return mem->state == MEM_OFFLINE;
}
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
#endif
#ifdef CONFIG_MEMORY_FAILURE
&dev_attr_soft_offline_page.attr,
&dev_attr_hard_offline_page.attr,
#endif
&dev_attr_block_size_bytes.attr,
&dev_attr_auto_online_blocks.attr,
NULL
};
static struct attribute_group memory_root_attr_group = {
.attrs = memory_root_attrs,
};
static const struct attribute_group *memory_root_attr_groups[] = {
&memory_root_attr_group,
NULL,
};
/*
* Initialize the sysfs support for memory devices...
*/
int __init memory_dev_init(void)
{
unsigned int i;
int ret;
int err;
unsigned long block_sz;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
goto out;
block_sz = get_memory_block_size();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
/*
* Create entries for memory sections that were found
* during boot and have been initialized
*/
mutex_lock(&mem_sysfs_mutex);
for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
/* Don't iterate over sections we know are !present: */
if (i > __highest_present_section_nr)
break;
err = add_memory_block(i);
if (!ret)
ret = err;
}
mutex_unlock(&mem_sysfs_mutex);
out:
if (ret)
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
return ret;
}