mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
dax/hmem: Move hmem device registration to dax_hmem.ko
In preparation for the CXL region driver to take over the responsibility of registering device-dax instances for CXL regions, move the registration of "hmem" devices to dax_hmem.ko. Previously the builtin component of this enabling (drivers/dax/hmem/device.o) would register platform devices for each address range and trigger the dax_hmem.ko module to load and attach device-dax instances to those devices. Now, the ranges are collected from the HMAT and EFI memory map walking, but the device creation is deferred. A new "hmem_platform" device is created which triggers dax_hmem.ko to load and register the platform devices. Tested-by: Fan Ni <fan.ni@samsung.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/167602002771.1924368.5653558226424530127.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
fe098574a9
commit
7dab174e2e
@ -718,7 +718,7 @@ static void hmat_register_target_devices(struct memory_target *target)
|
||||
for (res = target->memregions.child; res; res = res->sibling) {
|
||||
int target_nid = pxm_to_node(target->memory_pxm);
|
||||
|
||||
hmem_register_device(target_nid, res);
|
||||
hmem_register_resource(target_nid, res);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ config DEV_DAX_HMEM
|
||||
Say M if unsure.
|
||||
|
||||
config DEV_DAX_HMEM_DEVICES
|
||||
depends on DEV_DAX_HMEM && DAX=y
|
||||
depends on DEV_DAX_HMEM && DAX
|
||||
def_bool y
|
||||
|
||||
config DEV_DAX_KMEM
|
||||
|
@ -8,6 +8,8 @@
|
||||
static bool nohmem;
|
||||
module_param_named(disable, nohmem, bool, 0444);
|
||||
|
||||
static bool platform_initialized;
|
||||
static DEFINE_MUTEX(hmem_resource_lock);
|
||||
static struct resource hmem_active = {
|
||||
.name = "HMEM devices",
|
||||
.start = 0,
|
||||
@ -15,71 +17,66 @@ static struct resource hmem_active = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
void hmem_register_device(int target_nid, struct resource *res)
|
||||
int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
|
||||
{
|
||||
struct resource *res;
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&hmem_resource_lock);
|
||||
for (res = hmem_active.child; res; res = res->sibling) {
|
||||
rc = fn(host, (int) res->desc, res);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&hmem_resource_lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(walk_hmem_resources);
|
||||
|
||||
static void __hmem_register_resource(int target_nid, struct resource *res)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct memregion_info info;
|
||||
int rc, id;
|
||||
struct resource *new;
|
||||
int rc;
|
||||
|
||||
if (nohmem)
|
||||
return;
|
||||
|
||||
rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
|
||||
IORES_DESC_SOFT_RESERVED);
|
||||
if (rc != REGION_INTERSECTS)
|
||||
return;
|
||||
|
||||
id = memregion_alloc(GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
pr_err("memregion allocation failure for %pr\n", res);
|
||||
new = __request_region(&hmem_active, res->start, resource_size(res), "",
|
||||
0);
|
||||
if (!new) {
|
||||
pr_debug("hmem range %pr already active\n", res);
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = platform_device_alloc("hmem", id);
|
||||
new->desc = target_nid;
|
||||
|
||||
if (platform_initialized)
|
||||
return;
|
||||
|
||||
pdev = platform_device_alloc("hmem_platform", 0);
|
||||
if (!pdev) {
|
||||
pr_err("hmem device allocation failure for %pr\n", res);
|
||||
goto out_pdev;
|
||||
}
|
||||
|
||||
if (!__request_region(&hmem_active, res->start, resource_size(res),
|
||||
dev_name(&pdev->dev), 0)) {
|
||||
dev_dbg(&pdev->dev, "hmem range %pr already active\n", res);
|
||||
goto out_active;
|
||||
}
|
||||
|
||||
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
||||
info = (struct memregion_info) {
|
||||
.target_node = target_nid,
|
||||
.range = {
|
||||
.start = res->start,
|
||||
.end = res->end,
|
||||
},
|
||||
};
|
||||
rc = platform_device_add_data(pdev, &info, sizeof(info));
|
||||
if (rc < 0) {
|
||||
pr_err("hmem memregion_info allocation failure for %pr\n", res);
|
||||
goto out_resource;
|
||||
pr_err_once("failed to register device-dax hmem_platform device\n");
|
||||
return;
|
||||
}
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc < 0) {
|
||||
dev_err(&pdev->dev, "device add failed for %pr\n", res);
|
||||
goto out_resource;
|
||||
}
|
||||
if (rc)
|
||||
platform_device_put(pdev);
|
||||
else
|
||||
platform_initialized = true;
|
||||
}
|
||||
|
||||
return;
|
||||
void hmem_register_resource(int target_nid, struct resource *res)
|
||||
{
|
||||
if (nohmem)
|
||||
return;
|
||||
|
||||
out_resource:
|
||||
__release_region(&hmem_active, res->start, resource_size(res));
|
||||
out_active:
|
||||
platform_device_put(pdev);
|
||||
out_pdev:
|
||||
memregion_free(id);
|
||||
mutex_lock(&hmem_resource_lock);
|
||||
__hmem_register_resource(target_nid, res);
|
||||
mutex_unlock(&hmem_resource_lock);
|
||||
}
|
||||
|
||||
static __init int hmem_register_one(struct resource *res, void *data)
|
||||
{
|
||||
hmem_register_device(phys_to_target_node(res->start), res);
|
||||
hmem_register_resource(phys_to_target_node(res->start), res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <linux/memregion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/dax.h>
|
||||
#include "../bus.h"
|
||||
|
||||
static bool region_idle;
|
||||
@ -43,8 +44,110 @@ static struct platform_driver dax_hmem_driver = {
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(dax_hmem_driver);
|
||||
static void release_memregion(void *data)
|
||||
{
|
||||
memregion_free((long) data);
|
||||
}
|
||||
|
||||
static void release_hmem(void *pdev)
|
||||
{
|
||||
platform_device_unregister(pdev);
|
||||
}
|
||||
|
||||
static int hmem_register_device(struct device *host, int target_nid,
|
||||
const struct resource *res)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct memregion_info info;
|
||||
long id;
|
||||
int rc;
|
||||
|
||||
rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
|
||||
IORES_DESC_SOFT_RESERVED);
|
||||
if (rc != REGION_INTERSECTS)
|
||||
return 0;
|
||||
|
||||
id = memregion_alloc(GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
dev_err(host, "memregion allocation failure for %pr\n", res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pdev = platform_device_alloc("hmem", id);
|
||||
if (!pdev) {
|
||||
dev_err(host, "device allocation failure for %pr\n", res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
||||
info = (struct memregion_info) {
|
||||
.target_node = target_nid,
|
||||
.range = {
|
||||
.start = res->start,
|
||||
.end = res->end,
|
||||
},
|
||||
};
|
||||
rc = platform_device_add_data(pdev, &info, sizeof(info));
|
||||
if (rc < 0) {
|
||||
dev_err(host, "memregion_info allocation failure for %pr\n",
|
||||
res);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc < 0) {
|
||||
dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
|
||||
res);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(host, release_hmem, pdev);
|
||||
|
||||
out_put:
|
||||
platform_device_put(pdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dax_hmem_platform_probe(struct platform_device *pdev)
|
||||
{
|
||||
return walk_hmem_resources(&pdev->dev, hmem_register_device);
|
||||
}
|
||||
|
||||
static struct platform_driver dax_hmem_platform_driver = {
|
||||
.probe = dax_hmem_platform_probe,
|
||||
.driver = {
|
||||
.name = "hmem_platform",
|
||||
},
|
||||
};
|
||||
|
||||
static __init int dax_hmem_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = platform_driver_register(&dax_hmem_platform_driver);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = platform_driver_register(&dax_hmem_driver);
|
||||
if (rc)
|
||||
platform_driver_unregister(&dax_hmem_platform_driver);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static __exit void dax_hmem_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&dax_hmem_driver);
|
||||
platform_driver_unregister(&dax_hmem_platform_driver);
|
||||
}
|
||||
|
||||
module_init(dax_hmem_init);
|
||||
module_exit(dax_hmem_exit);
|
||||
|
||||
MODULE_ALIAS("platform:hmem*");
|
||||
MODULE_ALIAS("platform:hmem_platform*");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
|
@ -262,11 +262,14 @@ static inline bool dax_mapping(struct address_space *mapping)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
|
||||
void hmem_register_device(int target_nid, struct resource *r);
|
||||
void hmem_register_resource(int target_nid, struct resource *r);
|
||||
#else
|
||||
static inline void hmem_register_device(int target_nid, struct resource *r)
|
||||
static inline void hmem_register_resource(int target_nid, struct resource *r)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
|
||||
const struct resource *res);
|
||||
int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user