mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-12 07:14:03 +08:00
cxl for v5.16
- Fix support for platforms that do not enumerate every ACPI0016 (CXL Host Bridge) in the CHBS (ACPI Host Bridge Structure). - Introduce a common pci_find_dvsec_capability() helper, clean up open coded implementations in various drivers. - Add 'cxl_test' for regression testing CXL subsystem ABIs. 'cxl_test' is a module built from tools/testing/cxl/ that mocks up a CXL topology to augment the nascent support for emulation of CXL devices in QEMU. - Convert libnvdimm to use the uuid API. - Complete the definition of CXL namespace labels in libnvdimm. - Tunnel libnvdimm label operations from nd_ioctl() back to the CXL mailbox driver. Enable 'ndctl {read,write}-labels' for CXL. - Continue to sort and refactor functionality into distinct driver and core-infrastructure buckets. For example, mailbox handling is now a generic core capability consumed by the PCI and cxl_test drivers. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQSbo+XnGs+rwLz9XGXfioYZHlFsZwUCYYRtyQAKCRDfioYZHlFs Z7UsAP9DzUN6IWWnYk1R95YXYVxFriRtRsBjujAqTg49EMghawEAoHaA9lxO3Hho l25TLYUOmB/zFTlUbe6YQptMJZ5YLwY= =im9j -----END PGP SIGNATURE----- Merge tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl Pull cxl updates from Dan Williams: "More preparation and plumbing work in the CXL subsystem. From an end user perspective the highlight here is lighting up the CXL Persistent Memory related commands (label read / write) with the generic ioctl() front-end in LIBNVDIMM. Otherwise, the ability to instantiate new persistent and volatile memory regions is still on track for v5.17. Summary: - Fix support for platforms that do not enumerate every ACPI0016 (CXL Host Bridge) in the CHBS (ACPI Host Bridge Structure). - Introduce a common pci_find_dvsec_capability() helper, clean up open coded implementations in various drivers. - Add 'cxl_test' for regression testing CXL subsystem ABIs. 'cxl_test' is a module built from tools/testing/cxl/ that mocks up a CXL topology to augment the nascent support for emulation of CXL devices in QEMU. - Convert libnvdimm to use the uuid API. - Complete the definition of CXL namespace labels in libnvdimm. - Tunnel libnvdimm label operations from nd_ioctl() back to the CXL mailbox driver. Enable 'ndctl {read,write}-labels' for CXL. - Continue to sort and refactor functionality into distinct driver and core-infrastructure buckets. For example, mailbox handling is now a generic core capability consumed by the PCI and cxl_test drivers" * tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (34 commits) ocxl: Use pci core's DVSEC functionality cxl/pci: Use pci core's DVSEC functionality PCI: Add pci_find_dvsec_capability to find designated VSEC cxl/pci: Split cxl_pci_setup_regs() cxl/pci: Add @base to cxl_register_map cxl/pci: Make more use of cxl_register_map cxl/pci: Remove pci request/release regions cxl/pci: Fix NULL vs ERR_PTR confusion cxl/pci: Remove dev_dbg for unknown register blocks cxl/pci: Convert register block identifiers to an enum cxl/acpi: Do not fail cxl_acpi_probe() based on a missing CHBS cxl/pci: Disambiguate cxl_pci further from cxl_mem Documentation/cxl: Add bus internal docs cxl/core: Split decoder setup into alloc + add tools/testing/cxl: Introduce a mock memory device + driver cxl/mbox: Move command definitions to common location cxl/bus: Populate the target list at decoder create tools/testing/cxl: Introduce a mocked-up CXL port hierarchy cxl/pmem: Add support for multiple nvdimm-bridge objects cxl/pmem: Translate NVDIMM label commands to CXL label commands ...
This commit is contained in:
commit
dd72945c43
@ -39,12 +39,18 @@ CXL Core
|
||||
.. kernel-doc:: drivers/cxl/core/bus.c
|
||||
:doc: cxl core
|
||||
|
||||
.. kernel-doc:: drivers/cxl/core/bus.c
|
||||
:identifiers:
|
||||
|
||||
.. kernel-doc:: drivers/cxl/core/pmem.c
|
||||
:doc: cxl pmem
|
||||
|
||||
.. kernel-doc:: drivers/cxl/core/regs.c
|
||||
:doc: cxl registers
|
||||
|
||||
.. kernel-doc:: drivers/cxl/core/mbox.c
|
||||
:doc: cxl mbox
|
||||
|
||||
External Interfaces
|
||||
===================
|
||||
|
||||
|
@ -107,7 +107,8 @@ static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
|
||||
int pos;
|
||||
u32 val;
|
||||
|
||||
pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0);
|
||||
pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM,
|
||||
OCXL_DVSEC_FUNC_ID);
|
||||
if (!pos)
|
||||
return -ESRCH;
|
||||
|
||||
|
@ -52,6 +52,12 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
|
||||
dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
|
||||
CFMWS_INTERLEAVE_WAYS(cfmws));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
expected_len = struct_size((cfmws), interleave_targets,
|
||||
CFMWS_INTERLEAVE_WAYS(cfmws));
|
||||
|
||||
@ -71,11 +77,11 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
|
||||
static void cxl_add_cfmws_decoders(struct device *dev,
|
||||
struct cxl_port *root_port)
|
||||
{
|
||||
int target_map[CXL_DECODER_MAX_INTERLEAVE];
|
||||
struct acpi_cedt_cfmws *cfmws;
|
||||
struct cxl_decoder *cxld;
|
||||
acpi_size len, cur = 0;
|
||||
void *cedt_subtable;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
len = acpi_cedt->length - sizeof(*acpi_cedt);
|
||||
@ -83,6 +89,7 @@ static void cxl_add_cfmws_decoders(struct device *dev,
|
||||
|
||||
while (cur < len) {
|
||||
struct acpi_cedt_header *c = cedt_subtable + cur;
|
||||
int i;
|
||||
|
||||
if (c->type != ACPI_CEDT_TYPE_CFMWS) {
|
||||
cur += c->length;
|
||||
@ -108,24 +115,39 @@ static void cxl_add_cfmws_decoders(struct device *dev,
|
||||
continue;
|
||||
}
|
||||
|
||||
flags = cfmws_to_decoder_flags(cfmws->restrictions);
|
||||
cxld = devm_cxl_add_decoder(dev, root_port,
|
||||
CFMWS_INTERLEAVE_WAYS(cfmws),
|
||||
cfmws->base_hpa, cfmws->window_size,
|
||||
CFMWS_INTERLEAVE_WAYS(cfmws),
|
||||
CFMWS_INTERLEAVE_GRANULARITY(cfmws),
|
||||
CXL_DECODER_EXPANDER,
|
||||
flags);
|
||||
for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
|
||||
target_map[i] = cfmws->interleave_targets[i];
|
||||
|
||||
if (IS_ERR(cxld)) {
|
||||
cxld = cxl_decoder_alloc(root_port,
|
||||
CFMWS_INTERLEAVE_WAYS(cfmws));
|
||||
if (IS_ERR(cxld))
|
||||
goto next;
|
||||
|
||||
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
|
||||
cxld->target_type = CXL_DECODER_EXPANDER;
|
||||
cxld->range = (struct range) {
|
||||
.start = cfmws->base_hpa,
|
||||
.end = cfmws->base_hpa + cfmws->window_size - 1,
|
||||
};
|
||||
cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
|
||||
cxld->interleave_granularity =
|
||||
CFMWS_INTERLEAVE_GRANULARITY(cfmws);
|
||||
|
||||
rc = cxl_decoder_add(cxld, target_map);
|
||||
if (rc)
|
||||
put_device(&cxld->dev);
|
||||
else
|
||||
rc = cxl_decoder_autoremove(dev, cxld);
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
|
||||
cfmws->base_hpa, cfmws->base_hpa +
|
||||
cfmws->window_size - 1);
|
||||
} else {
|
||||
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
|
||||
dev_name(&cxld->dev), cfmws->base_hpa,
|
||||
cfmws->base_hpa + cfmws->window_size - 1);
|
||||
goto next;
|
||||
}
|
||||
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
|
||||
dev_name(&cxld->dev), cfmws->base_hpa,
|
||||
cfmws->base_hpa + cfmws->window_size - 1);
|
||||
next:
|
||||
cur += c->length;
|
||||
}
|
||||
}
|
||||
@ -182,15 +204,7 @@ static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
|
||||
return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
|
||||
}
|
||||
|
||||
struct cxl_walk_context {
|
||||
struct device *dev;
|
||||
struct pci_bus *root;
|
||||
struct cxl_port *port;
|
||||
int error;
|
||||
int count;
|
||||
};
|
||||
|
||||
static int match_add_root_ports(struct pci_dev *pdev, void *data)
|
||||
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct cxl_walk_context *ctx = data;
|
||||
struct pci_bus *root_bus = ctx->root;
|
||||
@ -239,7 +253,8 @@ static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct acpi_device *to_cxl_host_bridge(struct device *dev)
|
||||
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
|
||||
struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = to_acpi_device(dev);
|
||||
|
||||
@ -257,11 +272,12 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
|
||||
*/
|
||||
static int add_host_bridge_uport(struct device *match, void *arg)
|
||||
{
|
||||
struct acpi_device *bridge = to_cxl_host_bridge(match);
|
||||
struct cxl_port *root_port = arg;
|
||||
struct device *host = root_port->dev.parent;
|
||||
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
|
||||
struct acpi_pci_root *pci_root;
|
||||
struct cxl_walk_context ctx;
|
||||
int single_port_map[1], rc;
|
||||
struct cxl_decoder *cxld;
|
||||
struct cxl_dport *dport;
|
||||
struct cxl_port *port;
|
||||
@ -272,7 +288,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
|
||||
dport = find_dport_by_dev(root_port, match);
|
||||
if (!dport) {
|
||||
dev_dbg(host, "host bridge expected and not found\n");
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
|
||||
port = devm_cxl_add_port(host, match, dport->component_reg_phys,
|
||||
@ -297,22 +313,46 @@ static int add_host_bridge_uport(struct device *match, void *arg)
|
||||
return -ENODEV;
|
||||
if (ctx.error)
|
||||
return ctx.error;
|
||||
if (ctx.count > 1)
|
||||
return 0;
|
||||
|
||||
/* TODO: Scan CHBCR for HDM Decoder resources */
|
||||
|
||||
/*
|
||||
* In the single-port host-bridge case there are no HDM decoders
|
||||
* in the CHBCR and a 1:1 passthrough decode is implied.
|
||||
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
|
||||
* Structure) single ported host-bridges need not publish a decoder
|
||||
* capability when a passthrough decode can be assumed, i.e. all
|
||||
* transactions that the uport sees are claimed and passed to the single
|
||||
* dport. Disable the range until the first CXL region is enumerated /
|
||||
* activated.
|
||||
*/
|
||||
if (ctx.count == 1) {
|
||||
cxld = devm_cxl_add_passthrough_decoder(host, port);
|
||||
if (IS_ERR(cxld))
|
||||
return PTR_ERR(cxld);
|
||||
cxld = cxl_decoder_alloc(port, 1);
|
||||
if (IS_ERR(cxld))
|
||||
return PTR_ERR(cxld);
|
||||
|
||||
cxld->interleave_ways = 1;
|
||||
cxld->interleave_granularity = PAGE_SIZE;
|
||||
cxld->target_type = CXL_DECODER_EXPANDER;
|
||||
cxld->range = (struct range) {
|
||||
.start = 0,
|
||||
.end = -1,
|
||||
};
|
||||
|
||||
device_lock(&port->dev);
|
||||
dport = list_first_entry(&port->dports, typeof(*dport), list);
|
||||
device_unlock(&port->dev);
|
||||
|
||||
single_port_map[0] = dport->port_id;
|
||||
|
||||
rc = cxl_decoder_add(cxld, single_port_map);
|
||||
if (rc)
|
||||
put_device(&cxld->dev);
|
||||
else
|
||||
rc = cxl_decoder_autoremove(host, cxld);
|
||||
|
||||
if (rc == 0)
|
||||
dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
|
||||
}
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int add_host_bridge_dport(struct device *match, void *arg)
|
||||
@ -323,7 +363,7 @@ static int add_host_bridge_dport(struct device *match, void *arg)
|
||||
struct acpi_cedt_chbs *chbs;
|
||||
struct cxl_port *root_port = arg;
|
||||
struct device *host = root_port->dev.parent;
|
||||
struct acpi_device *bridge = to_cxl_host_bridge(match);
|
||||
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
|
||||
|
||||
if (!bridge)
|
||||
return 0;
|
||||
@ -337,9 +377,11 @@ static int add_host_bridge_dport(struct device *match, void *arg)
|
||||
}
|
||||
|
||||
chbs = cxl_acpi_match_chbs(host, uid);
|
||||
if (IS_ERR(chbs))
|
||||
dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
|
||||
dev_name(match));
|
||||
if (IS_ERR(chbs)) {
|
||||
dev_warn(host, "No CHBS found for Host Bridge: %s\n",
|
||||
dev_name(match));
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
|
||||
if (rc) {
|
||||
@ -375,6 +417,17 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u32 cedt_instance(struct platform_device *pdev)
|
||||
{
|
||||
const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
|
||||
|
||||
if (native_acpi0017 && *native_acpi0017)
|
||||
return 0;
|
||||
|
||||
/* for cxl_test request a non-canonical instance */
|
||||
return U32_MAX;
|
||||
}
|
||||
|
||||
static int cxl_acpi_probe(struct platform_device *pdev)
|
||||
{
|
||||
int rc;
|
||||
@ -388,7 +441,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(root_port);
|
||||
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
|
||||
status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENXIO;
|
||||
|
||||
@ -419,9 +472,11 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool native_acpi0017 = true;
|
||||
|
||||
static const struct acpi_device_id cxl_acpi_ids[] = {
|
||||
{ "ACPI0017", 0 },
|
||||
{ "", 0 },
|
||||
{ "ACPI0017", (unsigned long) &native_acpi0017 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
|
||||
|
||||
|
@ -6,3 +6,4 @@ cxl_core-y := bus.o
|
||||
cxl_core-y += pmem.o
|
||||
cxl_core-y += regs.o
|
||||
cxl_core-y += memdev.o
|
||||
cxl_core-y += mbox.o
|
||||
|
@ -453,50 +453,57 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_add_dport);
|
||||
|
||||
static struct cxl_decoder *
|
||||
cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
|
||||
resource_size_t len, int interleave_ways,
|
||||
int interleave_granularity, enum cxl_decoder_type type,
|
||||
unsigned long flags)
|
||||
static int decoder_populate_targets(struct cxl_decoder *cxld,
|
||||
struct cxl_port *port, int *target_map)
|
||||
{
|
||||
struct cxl_decoder *cxld;
|
||||
int rc = 0, i;
|
||||
|
||||
if (!target_map)
|
||||
return 0;
|
||||
|
||||
device_lock(&port->dev);
|
||||
if (list_empty(&port->dports)) {
|
||||
rc = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < cxld->nr_targets; i++) {
|
||||
struct cxl_dport *dport = find_dport(port, target_map[i]);
|
||||
|
||||
if (!dport) {
|
||||
rc = -ENXIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
cxld->target[i] = dport;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
device_unlock(&port->dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
|
||||
{
|
||||
struct cxl_decoder *cxld, cxld_const_init = {
|
||||
.nr_targets = nr_targets,
|
||||
};
|
||||
struct device *dev;
|
||||
int rc = 0;
|
||||
|
||||
if (interleave_ways < 1)
|
||||
if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
device_lock(&port->dev);
|
||||
if (list_empty(&port->dports))
|
||||
rc = -EINVAL;
|
||||
device_unlock(&port->dev);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
|
||||
if (!cxld)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
|
||||
|
||||
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
*cxld = (struct cxl_decoder) {
|
||||
.id = rc,
|
||||
.range = {
|
||||
.start = base,
|
||||
.end = base + len - 1,
|
||||
},
|
||||
.flags = flags,
|
||||
.interleave_ways = interleave_ways,
|
||||
.interleave_granularity = interleave_granularity,
|
||||
.target_type = type,
|
||||
};
|
||||
|
||||
/* handle implied target_list */
|
||||
if (interleave_ways == 1)
|
||||
cxld->target[0] =
|
||||
list_first_entry(&port->dports, struct cxl_dport, list);
|
||||
cxld->id = rc;
|
||||
dev = &cxld->dev;
|
||||
device_initialize(dev);
|
||||
device_set_pm_not_required(dev);
|
||||
@ -514,41 +521,47 @@ err:
|
||||
kfree(cxld);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
|
||||
|
||||
struct cxl_decoder *
|
||||
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
|
||||
resource_size_t base, resource_size_t len,
|
||||
int interleave_ways, int interleave_granularity,
|
||||
enum cxl_decoder_type type, unsigned long flags)
|
||||
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
|
||||
{
|
||||
struct cxl_decoder *cxld;
|
||||
struct cxl_port *port;
|
||||
struct device *dev;
|
||||
int rc;
|
||||
|
||||
cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
|
||||
interleave_granularity, type, flags);
|
||||
if (IS_ERR(cxld))
|
||||
return cxld;
|
||||
if (WARN_ON_ONCE(!cxld))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(IS_ERR(cxld)))
|
||||
return PTR_ERR(cxld);
|
||||
|
||||
if (cxld->interleave_ways < 1)
|
||||
return -EINVAL;
|
||||
|
||||
port = to_cxl_port(cxld->dev.parent);
|
||||
rc = decoder_populate_targets(cxld, port, target_map);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev = &cxld->dev;
|
||||
rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
|
||||
if (rc)
|
||||
goto err;
|
||||
return rc;
|
||||
|
||||
rc = device_add(dev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
return cxld;
|
||||
|
||||
err:
|
||||
put_device(dev);
|
||||
return ERR_PTR(rc);
|
||||
return device_add(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
|
||||
EXPORT_SYMBOL_GPL(cxl_decoder_add);
|
||||
|
||||
static void cxld_unregister(void *dev)
|
||||
{
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
|
||||
{
|
||||
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
|
||||
|
||||
/**
|
||||
* __cxl_driver_register - register a driver for the cxl bus
|
||||
@ -635,6 +648,8 @@ static __init int cxl_core_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
cxl_mbox_init();
|
||||
|
||||
rc = cxl_memdev_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -646,6 +661,7 @@ static __init int cxl_core_init(void)
|
||||
|
||||
err:
|
||||
cxl_memdev_exit();
|
||||
cxl_mbox_exit();
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -653,6 +669,7 @@ static void cxl_core_exit(void)
|
||||
{
|
||||
bus_unregister(&cxl_bus_type);
|
||||
cxl_memdev_exit();
|
||||
cxl_mbox_exit();
|
||||
}
|
||||
|
||||
module_init(cxl_core_init);
|
||||
|
@ -9,12 +9,15 @@ extern const struct device_type cxl_nvdimm_type;
|
||||
|
||||
extern struct attribute_group cxl_base_attribute_group;
|
||||
|
||||
static inline void unregister_cxl_dev(void *dev)
|
||||
{
|
||||
device_unregister(dev);
|
||||
}
|
||||
struct cxl_send_command;
|
||||
struct cxl_mem_query_commands;
|
||||
int cxl_query_cmd(struct cxl_memdev *cxlmd,
|
||||
struct cxl_mem_query_commands __user *q);
|
||||
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
|
||||
|
||||
int cxl_memdev_init(void);
|
||||
void cxl_memdev_exit(void);
|
||||
void cxl_mbox_init(void);
|
||||
void cxl_mbox_exit(void);
|
||||
|
||||
#endif /* __CXL_CORE_H__ */
|
||||
|
787
drivers/cxl/core/mbox.c
Normal file
787
drivers/cxl/core/mbox.c
Normal file
@ -0,0 +1,787 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <cxlmem.h>
|
||||
#include <cxl.h>
|
||||
|
||||
#include "core.h"
|
||||
|
||||
static bool cxl_raw_allow_all;
|
||||
|
||||
/**
|
||||
* DOC: cxl mbox
|
||||
*
|
||||
* Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
|
||||
* implementation is used by the cxl_pci driver to initialize the device
|
||||
* and implement the cxl_mem.h IOCTL UAPI. It also implements the
|
||||
* backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
|
||||
*/
|
||||
|
||||
#define cxl_for_each_cmd(cmd) \
|
||||
for ((cmd) = &cxl_mem_commands[0]; \
|
||||
((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
|
||||
|
||||
#define CXL_CMD(_id, sin, sout, _flags) \
|
||||
[CXL_MEM_COMMAND_ID_##_id] = { \
|
||||
.info = { \
|
||||
.id = CXL_MEM_COMMAND_ID_##_id, \
|
||||
.size_in = sin, \
|
||||
.size_out = sout, \
|
||||
}, \
|
||||
.opcode = CXL_MBOX_OP_##_id, \
|
||||
.flags = _flags, \
|
||||
}
|
||||
|
||||
/*
|
||||
* This table defines the supported mailbox commands for the driver. This table
|
||||
* is made up of a UAPI structure. Non-negative values as parameters in the
|
||||
* table will be validated against the user's input. For example, if size_in is
|
||||
* 0, and the user passed in 1, it is an error.
|
||||
*/
|
||||
static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
|
||||
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
|
||||
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
|
||||
CXL_CMD(RAW, ~0, ~0, 0),
|
||||
#endif
|
||||
CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
|
||||
CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
|
||||
CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
|
||||
CXL_CMD(GET_LSA, 0x8, ~0, 0),
|
||||
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
|
||||
CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
|
||||
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
|
||||
CXL_CMD(SET_LSA, ~0, 0, 0),
|
||||
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
|
||||
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
|
||||
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
|
||||
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
|
||||
CXL_CMD(GET_POISON, 0x10, ~0, 0),
|
||||
CXL_CMD(INJECT_POISON, 0x8, 0, 0),
|
||||
CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
|
||||
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
|
||||
CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
|
||||
CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0),
|
||||
};
|
||||
|
||||
/*
|
||||
* Commands that RAW doesn't permit. The rationale for each:
|
||||
*
|
||||
* CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
|
||||
* coordination of transaction timeout values at the root bridge level.
|
||||
*
|
||||
* CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
|
||||
* and needs to be coordinated with HDM updates.
|
||||
*
|
||||
* CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
|
||||
* driver and any writes from userspace invalidates those contents.
|
||||
*
|
||||
* CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
|
||||
* to the device after it is marked clean, userspace can not make that
|
||||
* assertion.
|
||||
*
|
||||
* CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
|
||||
* is kept up to date with patrol notifications and error management.
|
||||
*/
|
||||
static u16 cxl_disabled_raw_commands[] = {
|
||||
CXL_MBOX_OP_ACTIVATE_FW,
|
||||
CXL_MBOX_OP_SET_PARTITION_INFO,
|
||||
CXL_MBOX_OP_SET_LSA,
|
||||
CXL_MBOX_OP_SET_SHUTDOWN_STATE,
|
||||
CXL_MBOX_OP_SCAN_MEDIA,
|
||||
CXL_MBOX_OP_GET_SCAN_MEDIA,
|
||||
};
|
||||
|
||||
/*
|
||||
* Command sets that RAW doesn't permit. All opcodes in this set are
|
||||
* disabled because they pass plain text security payloads over the
|
||||
* user/kernel boundary. This functionality is intended to be wrapped
|
||||
* behind the keys ABI which allows for encrypted payloads in the UAPI
|
||||
*/
|
||||
static u8 security_command_sets[] = {
|
||||
0x44, /* Sanitize */
|
||||
0x45, /* Persistent Memory Data-at-rest Security */
|
||||
0x46, /* Security Passthrough */
|
||||
};
|
||||
|
||||
static bool cxl_is_security_command(u16 opcode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
|
||||
if (security_command_sets[i] == (opcode >> 8))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
|
||||
{
|
||||
struct cxl_mem_command *c;
|
||||
|
||||
cxl_for_each_cmd(c)
|
||||
if (c->opcode == opcode)
|
||||
return c;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
|
||||
* @cxlm: The CXL memory device to communicate with.
|
||||
* @opcode: Opcode for the mailbox command.
|
||||
* @in: The input payload for the mailbox command.
|
||||
* @in_size: The length of the input payload
|
||||
* @out: Caller allocated buffer for the output.
|
||||
* @out_size: Expected size of output.
|
||||
*
|
||||
* Context: Any context. Will acquire and release mbox_mutex.
|
||||
* Return:
|
||||
* * %>=0 - Number of bytes returned in @out.
|
||||
* * %-E2BIG - Payload is too large for hardware.
|
||||
* * %-EBUSY - Couldn't acquire exclusive mailbox access.
|
||||
* * %-EFAULT - Hardware error occurred.
|
||||
* * %-ENXIO - Command completed, but device reported an error.
|
||||
* * %-EIO - Unexpected output size.
|
||||
*
|
||||
* Mailbox commands may execute successfully yet the device itself reported an
|
||||
* error. While this distinction can be useful for commands from userspace, the
|
||||
* kernel will only be able to use results when both are successful.
|
||||
*
|
||||
* See __cxl_mem_mbox_send_cmd()
|
||||
*/
|
||||
int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
|
||||
size_t in_size, void *out, size_t out_size)
|
||||
{
|
||||
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
||||
struct cxl_mbox_cmd mbox_cmd = {
|
||||
.opcode = opcode,
|
||||
.payload_in = in,
|
||||
.size_in = in_size,
|
||||
.size_out = out_size,
|
||||
.payload_out = out,
|
||||
};
|
||||
int rc;
|
||||
|
||||
if (out_size > cxlm->payload_size)
|
||||
return -E2BIG;
|
||||
|
||||
rc = cxlm->mbox_send(cxlm, &mbox_cmd);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* TODO: Map return code to proper kernel style errno */
|
||||
if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* Variable sized commands can't be validated and so it's up to the
|
||||
* caller to do that if they wish.
|
||||
*/
|
||||
if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd);
|
||||
|
||||
static bool cxl_mem_raw_command_allowed(u16 opcode)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
|
||||
return false;
|
||||
|
||||
if (security_locked_down(LOCKDOWN_PCI_ACCESS))
|
||||
return false;
|
||||
|
||||
if (cxl_raw_allow_all)
|
||||
return true;
|
||||
|
||||
if (cxl_is_security_command(opcode))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
|
||||
if (cxl_disabled_raw_commands[i] == opcode)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
|
||||
* @cxlm: &struct cxl_mem device whose mailbox will be used.
|
||||
* @send_cmd: &struct cxl_send_command copied in from userspace.
|
||||
* @out_cmd: Sanitized and populated &struct cxl_mem_command.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - @out_cmd is ready to send.
|
||||
* * %-ENOTTY - Invalid command specified.
|
||||
* * %-EINVAL - Reserved fields or invalid values were used.
|
||||
* * %-ENOMEM - Input or output buffer wasn't sized properly.
|
||||
* * %-EPERM - Attempted to use a protected command.
|
||||
* * %-EBUSY - Kernel has claimed exclusive access to this opcode
|
||||
*
|
||||
* The result of this command is a fully validated command in @out_cmd that is
|
||||
* safe to send to the hardware.
|
||||
*
|
||||
* See handle_mailbox_cmd_from_user()
|
||||
*/
|
||||
static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
|
||||
const struct cxl_send_command *send_cmd,
|
||||
struct cxl_mem_command *out_cmd)
|
||||
{
|
||||
const struct cxl_command_info *info;
|
||||
struct cxl_mem_command *c;
|
||||
|
||||
if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
|
||||
return -ENOTTY;
|
||||
|
||||
/*
|
||||
* The user can never specify an input payload larger than what hardware
|
||||
* supports, but output can be arbitrarily large (simply write out as
|
||||
* much data as the hardware provides).
|
||||
*/
|
||||
if (send_cmd->in.size > cxlm->payload_size)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Checks are bypassed for raw commands but a WARN/taint will occur
|
||||
* later in the callchain
|
||||
*/
|
||||
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
|
||||
const struct cxl_mem_command temp = {
|
||||
.info = {
|
||||
.id = CXL_MEM_COMMAND_ID_RAW,
|
||||
.flags = 0,
|
||||
.size_in = send_cmd->in.size,
|
||||
.size_out = send_cmd->out.size,
|
||||
},
|
||||
.opcode = send_cmd->raw.opcode
|
||||
};
|
||||
|
||||
if (send_cmd->raw.rsvd)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Unlike supported commands, the output size of RAW commands
|
||||
* gets passed along without further checking, so it must be
|
||||
* validated here.
|
||||
*/
|
||||
if (send_cmd->out.size > cxlm->payload_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
|
||||
return -EPERM;
|
||||
|
||||
memcpy(out_cmd, &temp, sizeof(temp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (send_cmd->rsvd)
|
||||
return -EINVAL;
|
||||
|
||||
if (send_cmd->in.rsvd || send_cmd->out.rsvd)
|
||||
return -EINVAL;
|
||||
|
||||
/* Convert user's command into the internal representation */
|
||||
c = &cxl_mem_commands[send_cmd->id];
|
||||
info = &c->info;
|
||||
|
||||
/* Check that the command is enabled for hardware */
|
||||
if (!test_bit(info->id, cxlm->enabled_cmds))
|
||||
return -ENOTTY;
|
||||
|
||||
/* Check that the command is not claimed for exclusive kernel use */
|
||||
if (test_bit(info->id, cxlm->exclusive_cmds))
|
||||
return -EBUSY;
|
||||
|
||||
/* Check the input buffer is the expected size */
|
||||
if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check the output buffer is at least large enough */
|
||||
if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(out_cmd, c, sizeof(*c));
|
||||
out_cmd->info.size_in = send_cmd->in.size;
|
||||
/*
|
||||
* XXX: out_cmd->info.size_out will be controlled by the driver, and the
|
||||
* specified number of bytes @send_cmd->out.size will be copied back out
|
||||
* to userspace.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cxl_query_cmd(struct cxl_memdev *cxlmd,
|
||||
struct cxl_mem_query_commands __user *q)
|
||||
{
|
||||
struct device *dev = &cxlmd->dev;
|
||||
struct cxl_mem_command *cmd;
|
||||
u32 n_commands;
|
||||
int j = 0;
|
||||
|
||||
dev_dbg(dev, "Query IOCTL\n");
|
||||
|
||||
if (get_user(n_commands, &q->n_commands))
|
||||
return -EFAULT;
|
||||
|
||||
/* returns the total number if 0 elements are requested. */
|
||||
if (n_commands == 0)
|
||||
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
|
||||
|
||||
/*
|
||||
* otherwise, return max(n_commands, total commands) cxl_command_info
|
||||
* structures.
|
||||
*/
|
||||
cxl_for_each_cmd(cmd) {
|
||||
const struct cxl_command_info *info = &cmd->info;
|
||||
|
||||
if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
|
||||
return -EFAULT;
|
||||
|
||||
if (j == n_commands)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
|
||||
* @cxlm: The CXL memory device to communicate with.
|
||||
* @cmd: The validated command.
|
||||
* @in_payload: Pointer to userspace's input payload.
|
||||
* @out_payload: Pointer to userspace's output payload.
|
||||
* @size_out: (Input) Max payload size to copy out.
|
||||
* (Output) Payload size hardware generated.
|
||||
* @retval: Hardware generated return code from the operation.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - Mailbox transaction succeeded. This implies the mailbox
|
||||
* protocol completed successfully not that the operation itself
|
||||
* was successful.
|
||||
* * %-ENOMEM - Couldn't allocate a bounce buffer.
|
||||
* * %-EFAULT - Something happened with copy_to/from_user.
|
||||
* * %-EINTR - Mailbox acquisition interrupted.
|
||||
* * %-EXXX - Transaction level failures.
|
||||
*
|
||||
* Creates the appropriate mailbox command and dispatches it on behalf of a
|
||||
* userspace request. The input and output payloads are copied between
|
||||
* userspace.
|
||||
*
|
||||
* See cxl_send_cmd().
|
||||
*/
|
||||
static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
|
||||
const struct cxl_mem_command *cmd,
|
||||
u64 in_payload, u64 out_payload,
|
||||
s32 *size_out, u32 *retval)
|
||||
{
|
||||
struct device *dev = cxlm->dev;
|
||||
struct cxl_mbox_cmd mbox_cmd = {
|
||||
.opcode = cmd->opcode,
|
||||
.size_in = cmd->info.size_in,
|
||||
.size_out = cmd->info.size_out,
|
||||
};
|
||||
int rc;
|
||||
|
||||
if (cmd->info.size_out) {
|
||||
mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
|
||||
if (!mbox_cmd.payload_out)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (cmd->info.size_in) {
|
||||
mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
|
||||
cmd->info.size_in);
|
||||
if (IS_ERR(mbox_cmd.payload_in)) {
|
||||
kvfree(mbox_cmd.payload_out);
|
||||
return PTR_ERR(mbox_cmd.payload_in);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(dev,
|
||||
"Submitting %s command for user\n"
|
||||
"\topcode: %x\n"
|
||||
"\tsize: %ub\n",
|
||||
cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
|
||||
cmd->info.size_in);
|
||||
|
||||
dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
|
||||
"raw command path used\n");
|
||||
|
||||
rc = cxlm->mbox_send(cxlm, &mbox_cmd);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* @size_out contains the max size that's allowed to be written back out
|
||||
* to userspace. While the payload may have written more output than
|
||||
* this it will have to be ignored.
|
||||
*/
|
||||
if (mbox_cmd.size_out) {
|
||||
dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
|
||||
"Invalid return size\n");
|
||||
if (copy_to_user(u64_to_user_ptr(out_payload),
|
||||
mbox_cmd.payload_out, mbox_cmd.size_out)) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
*size_out = mbox_cmd.size_out;
|
||||
*retval = mbox_cmd.return_code;
|
||||
|
||||
out:
|
||||
kvfree(mbox_cmd.payload_in);
|
||||
kvfree(mbox_cmd.payload_out);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
|
||||
{
|
||||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
struct device *dev = &cxlmd->dev;
|
||||
struct cxl_send_command send;
|
||||
struct cxl_mem_command c;
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "Send IOCTL\n");
|
||||
|
||||
if (copy_from_user(&send, s, sizeof(send)))
|
||||
return -EFAULT;
|
||||
|
||||
rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Prepare to handle a full payload for variable sized output */
|
||||
if (c.info.size_out < 0)
|
||||
c.info.size_out = cxlm->payload_size;
|
||||
|
||||
rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
|
||||
send.out.payload, &send.out.size,
|
||||
&send.retval);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (copy_to_user(s, &send, sizeof(send)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
|
||||
{
|
||||
u32 remaining = size;
|
||||
u32 offset = 0;
|
||||
|
||||
while (remaining) {
|
||||
u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
|
||||
struct cxl_mbox_get_log log = {
|
||||
.uuid = *uuid,
|
||||
.offset = cpu_to_le32(offset),
|
||||
.length = cpu_to_le32(xfer_size)
|
||||
};
|
||||
int rc;
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
|
||||
sizeof(log), out, xfer_size);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
out += xfer_size;
|
||||
remaining -= xfer_size;
|
||||
offset += xfer_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxl_walk_cel() - Walk through the Command Effects Log.
|
||||
* @cxlm: Device.
|
||||
* @size: Length of the Command Effects Log.
|
||||
* @cel: CEL
|
||||
*
|
||||
* Iterate over each entry in the CEL and determine if the driver supports the
|
||||
* command. If so, the command is enabled for the device and can be used later.
|
||||
*/
|
||||
static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
|
||||
{
|
||||
struct cxl_cel_entry *cel_entry;
|
||||
const int cel_entries = size / sizeof(*cel_entry);
|
||||
int i;
|
||||
|
||||
cel_entry = (struct cxl_cel_entry *) cel;
|
||||
|
||||
for (i = 0; i < cel_entries; i++) {
|
||||
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
|
||||
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
||||
|
||||
if (!cmd) {
|
||||
dev_dbg(cxlm->dev,
|
||||
"Opcode 0x%04x unsupported by driver", opcode);
|
||||
continue;
|
||||
}
|
||||
|
||||
set_bit(cmd->info.id, cxlm->enabled_cmds);
|
||||
}
|
||||
}
|
||||
|
||||
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct cxl_mbox_get_supported_logs *ret;
|
||||
int rc;
|
||||
|
||||
ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
|
||||
if (!ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
|
||||
0, ret, cxlm->payload_size);
|
||||
if (rc < 0) {
|
||||
kvfree(ret);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum {
|
||||
CEL_UUID,
|
||||
VENDOR_DEBUG_UUID,
|
||||
};
|
||||
|
||||
/* See CXL 2.0 Table 170. Get Log Input Payload */
|
||||
static const uuid_t log_uuid[] = {
|
||||
[CEL_UUID] = DEFINE_CXL_CEL_UUID,
|
||||
[VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
|
||||
};
|
||||
|
||||
/**
|
||||
* cxl_mem_enumerate_cmds() - Enumerate commands for a device.
|
||||
* @cxlm: The device.
|
||||
*
|
||||
* Returns 0 if enumerate completed successfully.
|
||||
*
|
||||
* CXL devices have optional support for certain commands. This function will
|
||||
* determine the set of supported commands for the hardware and update the
|
||||
* enabled_cmds bitmap in the @cxlm.
|
||||
*/
|
||||
int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct cxl_mbox_get_supported_logs *gsl;
|
||||
struct device *dev = cxlm->dev;
|
||||
struct cxl_mem_command *cmd;
|
||||
int i, rc;
|
||||
|
||||
gsl = cxl_get_gsl(cxlm);
|
||||
if (IS_ERR(gsl))
|
||||
return PTR_ERR(gsl);
|
||||
|
||||
rc = -ENOENT;
|
||||
for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
|
||||
u32 size = le32_to_cpu(gsl->entry[i].size);
|
||||
uuid_t uuid = gsl->entry[i].uuid;
|
||||
u8 *log;
|
||||
|
||||
dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
|
||||
|
||||
if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
|
||||
continue;
|
||||
|
||||
log = kvmalloc(size, GFP_KERNEL);
|
||||
if (!log) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = cxl_xfer_log(cxlm, &uuid, size, log);
|
||||
if (rc) {
|
||||
kvfree(log);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cxl_walk_cel(cxlm, size, log);
|
||||
kvfree(log);
|
||||
|
||||
/* In case CEL was bogus, enable some default commands. */
|
||||
cxl_for_each_cmd(cmd)
|
||||
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
|
||||
set_bit(cmd->info.id, cxlm->enabled_cmds);
|
||||
|
||||
/* Found the required CEL */
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(gsl);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
|
||||
|
||||
/**
|
||||
* cxl_mem_get_partition_info - Get partition info
|
||||
* @cxlm: cxl_mem instance to update partition info
|
||||
*
|
||||
* Retrieve the current partition info for the device specified. The active
|
||||
* values are the current capacity in bytes. If not 0, the 'next' values are
|
||||
* the pending values, in bytes, which take affect on next cold reset.
|
||||
*
|
||||
* Return: 0 if no error: or the result of the mailbox command.
|
||||
*
|
||||
* See CXL @8.2.9.5.2.1 Get Partition Info
|
||||
*/
|
||||
static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct cxl_mbox_get_partition_info {
|
||||
__le64 active_volatile_cap;
|
||||
__le64 active_persistent_cap;
|
||||
__le64 next_volatile_cap;
|
||||
__le64 next_persistent_cap;
|
||||
} __packed pi;
|
||||
int rc;
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
|
||||
NULL, 0, &pi, sizeof(pi));
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cxlm->active_volatile_bytes =
|
||||
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->active_persistent_bytes =
|
||||
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->next_volatile_bytes =
|
||||
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->next_persistent_bytes =
|
||||
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxl_mem_identify() - Send the IDENTIFY command to the device.
|
||||
* @cxlm: The device to identify.
|
||||
*
|
||||
* Return: 0 if identify was executed successfully.
|
||||
*
|
||||
* This will dispatch the identify command to the device and on success populate
|
||||
* structures to be exported to sysfs.
|
||||
*/
|
||||
int cxl_mem_identify(struct cxl_mem *cxlm)
|
||||
{
|
||||
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
|
||||
struct cxl_mbox_identify id;
|
||||
int rc;
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
|
||||
sizeof(id));
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
cxlm->total_bytes =
|
||||
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->volatile_only_bytes =
|
||||
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->persistent_only_bytes =
|
||||
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
|
||||
cxlm->partition_align_bytes =
|
||||
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
|
||||
|
||||
dev_dbg(cxlm->dev,
|
||||
"Identify Memory Device\n"
|
||||
" total_bytes = %#llx\n"
|
||||
" volatile_only_bytes = %#llx\n"
|
||||
" persistent_only_bytes = %#llx\n"
|
||||
" partition_align_bytes = %#llx\n",
|
||||
cxlm->total_bytes, cxlm->volatile_only_bytes,
|
||||
cxlm->persistent_only_bytes, cxlm->partition_align_bytes);
|
||||
|
||||
cxlm->lsa_size = le32_to_cpu(id.lsa_size);
|
||||
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_mem_identify);
|
||||
|
||||
int cxl_mem_create_range_info(struct cxl_mem *cxlm)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (cxlm->partition_align_bytes == 0) {
|
||||
cxlm->ram_range.start = 0;
|
||||
cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
|
||||
cxlm->pmem_range.start = cxlm->volatile_only_bytes;
|
||||
cxlm->pmem_range.end = cxlm->volatile_only_bytes +
|
||||
cxlm->persistent_only_bytes - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = cxl_mem_get_partition_info(cxlm);
|
||||
if (rc) {
|
||||
dev_err(cxlm->dev, "Failed to query partition information\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
dev_dbg(cxlm->dev,
|
||||
"Get Partition Info\n"
|
||||
" active_volatile_bytes = %#llx\n"
|
||||
" active_persistent_bytes = %#llx\n"
|
||||
" next_volatile_bytes = %#llx\n"
|
||||
" next_persistent_bytes = %#llx\n",
|
||||
cxlm->active_volatile_bytes, cxlm->active_persistent_bytes,
|
||||
cxlm->next_volatile_bytes, cxlm->next_persistent_bytes);
|
||||
|
||||
cxlm->ram_range.start = 0;
|
||||
cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
|
||||
|
||||
cxlm->pmem_range.start = cxlm->active_volatile_bytes;
|
||||
cxlm->pmem_range.end =
|
||||
cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_mem_create_range_info);
|
||||
|
||||
struct cxl_mem *cxl_mem_create(struct device *dev)
|
||||
{
|
||||
struct cxl_mem *cxlm;
|
||||
|
||||
cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
|
||||
if (!cxlm) {
|
||||
dev_err(dev, "No memory available\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
mutex_init(&cxlm->mbox_mutex);
|
||||
cxlm->dev = dev;
|
||||
|
||||
return cxlm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_mem_create);
|
||||
|
||||
static struct dentry *cxl_debugfs;
|
||||
|
||||
void __init cxl_mbox_init(void)
|
||||
{
|
||||
struct dentry *mbox_debugfs;
|
||||
|
||||
cxl_debugfs = debugfs_create_dir("cxl", NULL);
|
||||
mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
|
||||
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
|
||||
&cxl_raw_allow_all);
|
||||
}
|
||||
|
||||
void cxl_mbox_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(cxl_debugfs);
|
||||
}
|
@ -8,6 +8,8 @@
|
||||
#include <cxlmem.h>
|
||||
#include "core.h"
|
||||
|
||||
static DECLARE_RWSEM(cxl_memdev_rwsem);
|
||||
|
||||
/*
|
||||
* An entire PCI topology full of devices should be enough for any
|
||||
* config
|
||||
@ -132,16 +134,53 @@ static const struct device_type cxl_memdev_type = {
|
||||
.groups = cxl_memdev_attribute_groups,
|
||||
};
|
||||
|
||||
/**
|
||||
* set_exclusive_cxl_commands() - atomically disable user cxl commands
|
||||
* @cxlm: cxl_mem instance to modify
|
||||
* @cmds: bitmap of commands to mark exclusive
|
||||
*
|
||||
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
|
||||
* invocations of the ioctl path and then disable future execution of
|
||||
* commands with the command ids set in @cmds.
|
||||
*/
|
||||
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
|
||||
{
|
||||
down_write(&cxl_memdev_rwsem);
|
||||
bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
|
||||
CXL_MEM_COMMAND_ID_MAX);
|
||||
up_write(&cxl_memdev_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
|
||||
|
||||
/**
|
||||
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
|
||||
* @cxlm: cxl_mem instance to modify
|
||||
* @cmds: bitmap of commands to mark available for userspace
|
||||
*/
|
||||
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
|
||||
{
|
||||
down_write(&cxl_memdev_rwsem);
|
||||
bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
|
||||
CXL_MEM_COMMAND_ID_MAX);
|
||||
up_write(&cxl_memdev_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
|
||||
|
||||
static void cxl_memdev_shutdown(struct device *dev)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
|
||||
down_write(&cxl_memdev_rwsem);
|
||||
cxlmd->cxlm = NULL;
|
||||
up_write(&cxl_memdev_rwsem);
|
||||
}
|
||||
|
||||
static void cxl_memdev_unregister(void *_cxlmd)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = _cxlmd;
|
||||
struct device *dev = &cxlmd->dev;
|
||||
struct cdev *cdev = &cxlmd->cdev;
|
||||
const struct cdevm_file_operations *cdevm_fops;
|
||||
|
||||
cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
|
||||
cdevm_fops->shutdown(dev);
|
||||
|
||||
cxl_memdev_shutdown(dev);
|
||||
cdev_device_del(&cxlmd->cdev, dev);
|
||||
put_device(dev);
|
||||
}
|
||||
@ -149,7 +188,6 @@ static void cxl_memdev_unregister(void *_cxlmd)
|
||||
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
|
||||
const struct file_operations *fops)
|
||||
{
|
||||
struct pci_dev *pdev = cxlm->pdev;
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct device *dev;
|
||||
struct cdev *cdev;
|
||||
@ -166,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
|
||||
|
||||
dev = &cxlmd->dev;
|
||||
device_initialize(dev);
|
||||
dev->parent = &pdev->dev;
|
||||
dev->parent = cxlm->dev;
|
||||
dev->bus = &cxl_bus_type;
|
||||
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
|
||||
dev->type = &cxl_memdev_type;
|
||||
@ -181,16 +219,72 @@ err:
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case CXL_MEM_QUERY_COMMANDS:
|
||||
return cxl_query_cmd(cxlmd, (void __user *)arg);
|
||||
case CXL_MEM_SEND_COMMAND:
|
||||
return cxl_send_cmd(cxlmd, (void __user *)arg);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
||||
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = file->private_data;
|
||||
int rc = -ENXIO;
|
||||
|
||||
down_read(&cxl_memdev_rwsem);
|
||||
if (cxlmd->cxlm)
|
||||
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
|
||||
up_read(&cxl_memdev_rwsem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cxl_memdev_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cxl_memdev *cxlmd =
|
||||
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
||||
|
||||
get_device(&cxlmd->dev);
|
||||
file->private_data = cxlmd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_memdev_release_file(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cxl_memdev *cxlmd =
|
||||
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
||||
|
||||
put_device(&cxlmd->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations cxl_memdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.unlocked_ioctl = cxl_memdev_ioctl,
|
||||
.open = cxl_memdev_open,
|
||||
.release = cxl_memdev_release_file,
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
struct cxl_memdev *
|
||||
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
|
||||
const struct cdevm_file_operations *cdevm_fops)
|
||||
devm_cxl_add_memdev(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct device *dev;
|
||||
struct cdev *cdev;
|
||||
int rc;
|
||||
|
||||
cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
|
||||
cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
|
||||
if (IS_ERR(cxlmd))
|
||||
return cxlmd;
|
||||
|
||||
@ -210,7 +304,7 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
|
||||
rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
return cxlmd;
|
||||
@ -220,7 +314,7 @@ err:
|
||||
* The cdev was briefly live, shutdown any ioctl operations that
|
||||
* saw that state.
|
||||
*/
|
||||
cdevm_fops->shutdown(dev);
|
||||
cxl_memdev_shutdown(dev);
|
||||
put_device(dev);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright(c) 2020 Intel Corporation. */
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <cxlmem.h>
|
||||
#include <cxl.h>
|
||||
#include "core.h"
|
||||
@ -20,10 +21,13 @@
|
||||
* operations, for example, namespace label access commands.
|
||||
*/
|
||||
|
||||
static DEFINE_IDA(cxl_nvdimm_bridge_ida);
|
||||
|
||||
static void cxl_nvdimm_bridge_release(struct device *dev)
|
||||
{
|
||||
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
|
||||
|
||||
ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
|
||||
kfree(cxl_nvb);
|
||||
}
|
||||
|
||||
@ -47,16 +51,38 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
|
||||
|
||||
__mock int match_nvdimm_bridge(struct device *dev, const void *data)
|
||||
{
|
||||
return dev->type == &cxl_nvdimm_bridge_type;
|
||||
}
|
||||
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&cxl_bus_type, NULL, cxl_nvd, match_nvdimm_bridge);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
return to_cxl_nvdimm_bridge(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
|
||||
|
||||
static struct cxl_nvdimm_bridge *
|
||||
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
|
||||
{
|
||||
struct cxl_nvdimm_bridge *cxl_nvb;
|
||||
struct device *dev;
|
||||
int rc;
|
||||
|
||||
cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
|
||||
if (!cxl_nvb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
cxl_nvb->id = rc;
|
||||
|
||||
dev = &cxl_nvb->dev;
|
||||
cxl_nvb->port = port;
|
||||
cxl_nvb->state = CXL_NVB_NEW;
|
||||
@ -67,6 +93,10 @@ cxl_nvdimm_bridge_alloc(struct cxl_port *port)
|
||||
dev->type = &cxl_nvdimm_bridge_type;
|
||||
|
||||
return cxl_nvb;
|
||||
|
||||
err:
|
||||
kfree(cxl_nvb);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
static void unregister_nvb(void *_cxl_nvb)
|
||||
@ -119,7 +149,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
|
||||
return cxl_nvb;
|
||||
|
||||
dev = &cxl_nvb->dev;
|
||||
rc = dev_set_name(dev, "nvdimm-bridge");
|
||||
rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
@ -192,6 +222,11 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
|
||||
return cxl_nvd;
|
||||
}
|
||||
|
||||
static void cxl_nvd_unregister(void *dev)
|
||||
{
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
|
||||
* @host: same host as @cxlmd
|
||||
@ -221,7 +256,7 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
|
||||
dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
|
||||
dev_name(dev));
|
||||
|
||||
return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
|
||||
return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
|
||||
|
||||
err:
|
||||
put_device(dev);
|
||||
|
@ -114,7 +114,17 @@ struct cxl_device_reg_map {
|
||||
struct cxl_reg_map memdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_register_map - DVSEC harvested register block mapping parameters
|
||||
* @base: virtual base of the register-block-BAR + @block_offset
|
||||
* @block_offset: offset to start of register block in @barno
|
||||
* @reg_type: see enum cxl_regloc_type
|
||||
* @barno: PCI BAR number containing the register block
|
||||
* @component_map: cxl_reg_map for component registers
|
||||
* @device_map: cxl_reg_maps for device registers
|
||||
*/
|
||||
struct cxl_register_map {
|
||||
void __iomem *base;
|
||||
u64 block_offset;
|
||||
u8 reg_type;
|
||||
u8 barno;
|
||||
@ -155,6 +165,12 @@ enum cxl_decoder_type {
|
||||
CXL_DECODER_EXPANDER = 3,
|
||||
};
|
||||
|
||||
/*
|
||||
* Current specification goes up to 8, double that seems a reasonable
|
||||
* software max for the foreseeable future
|
||||
*/
|
||||
#define CXL_DECODER_MAX_INTERLEAVE 16
|
||||
|
||||
/**
|
||||
* struct cxl_decoder - CXL address range decode configuration
|
||||
* @dev: this decoder's device
|
||||
@ -164,6 +180,7 @@ enum cxl_decoder_type {
|
||||
* @interleave_granularity: data stride per dport
|
||||
* @target_type: accelerator vs expander (type2 vs type3) selector
|
||||
* @flags: memory type capabilities and locking
|
||||
* @nr_targets: number of elements in @target
|
||||
* @target: active ordered target list in current decoder configuration
|
||||
*/
|
||||
struct cxl_decoder {
|
||||
@ -174,6 +191,7 @@ struct cxl_decoder {
|
||||
int interleave_granularity;
|
||||
enum cxl_decoder_type target_type;
|
||||
unsigned long flags;
|
||||
const int nr_targets;
|
||||
struct cxl_dport *target[];
|
||||
};
|
||||
|
||||
@ -186,6 +204,7 @@ enum cxl_nvdimm_brige_state {
|
||||
};
|
||||
|
||||
struct cxl_nvdimm_bridge {
|
||||
int id;
|
||||
struct device dev;
|
||||
struct cxl_port *port;
|
||||
struct nvdimm_bus *nvdimm_bus;
|
||||
@ -200,6 +219,14 @@ struct cxl_nvdimm {
|
||||
struct nvdimm *nvdimm;
|
||||
};
|
||||
|
||||
struct cxl_walk_context {
|
||||
struct device *dev;
|
||||
struct pci_bus *root;
|
||||
struct cxl_port *port;
|
||||
int error;
|
||||
int count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_port - logical collection of upstream port devices and
|
||||
* downstream port devices to construct a CXL memory
|
||||
@ -246,25 +273,9 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
|
||||
|
||||
struct cxl_decoder *to_cxl_decoder(struct device *dev);
|
||||
bool is_root_decoder(struct device *dev);
|
||||
struct cxl_decoder *
|
||||
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
|
||||
resource_size_t base, resource_size_t len,
|
||||
int interleave_ways, int interleave_granularity,
|
||||
enum cxl_decoder_type type, unsigned long flags);
|
||||
|
||||
/*
|
||||
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
|
||||
* single ported host-bridges need not publish a decoder capability when a
|
||||
* passthrough decode can be assumed, i.e. all transactions that the uport sees
|
||||
* are claimed and passed to the single dport. Default the range a 0-base
|
||||
* 0-length until the first CXL region is activated.
|
||||
*/
|
||||
static inline struct cxl_decoder *
|
||||
devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
|
||||
{
|
||||
return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
|
||||
CXL_DECODER_EXPANDER, 0);
|
||||
}
|
||||
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets);
|
||||
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
|
||||
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
|
||||
|
||||
extern struct bus_type cxl_bus_type;
|
||||
|
||||
@ -298,4 +309,13 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
|
||||
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
|
||||
bool is_cxl_nvdimm(struct device *dev);
|
||||
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
|
||||
|
||||
/*
|
||||
* Unit test builds overrides this to __weak, find the 'strong' version
|
||||
* of these symbols in tools/testing/cxl/.
|
||||
*/
|
||||
#ifndef __mock
|
||||
#define __mock static
|
||||
#endif
|
||||
#endif /* __CXL_H__ */
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright(c) 2020-2021 Intel Corporation. */
|
||||
#ifndef __CXL_MEM_H__
|
||||
#define __CXL_MEM_H__
|
||||
#include <uapi/linux/cxl_mem.h>
|
||||
#include <linux/cdev.h>
|
||||
#include "cxl.h"
|
||||
|
||||
@ -28,21 +29,6 @@
|
||||
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
|
||||
CXLMDEV_RESET_NEEDED_NOT)
|
||||
|
||||
/**
|
||||
* struct cdevm_file_operations - devm coordinated cdev file operations
|
||||
* @fops: file operations that are synchronized against @shutdown
|
||||
* @shutdown: disconnect driver data
|
||||
*
|
||||
* @shutdown is invoked in the devres release path to disconnect any
|
||||
* driver instance data from @dev. It assumes synchronization with any
|
||||
* fops operation that requires driver data. After @shutdown an
|
||||
* operation may only reference @device data.
|
||||
*/
|
||||
struct cdevm_file_operations {
|
||||
struct file_operations fops;
|
||||
void (*shutdown)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
|
||||
* @dev: driver core device object
|
||||
@ -62,13 +48,50 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
|
||||
return container_of(dev, struct cxl_memdev, dev);
|
||||
}
|
||||
|
||||
struct cxl_memdev *
|
||||
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
|
||||
const struct cdevm_file_operations *cdevm_fops);
|
||||
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
|
||||
|
||||
/**
|
||||
* struct cxl_mbox_cmd - A command to be submitted to hardware.
|
||||
* @opcode: (input) The command set and command submitted to hardware.
|
||||
* @payload_in: (input) Pointer to the input payload.
|
||||
* @payload_out: (output) Pointer to the output payload. Must be allocated by
|
||||
* the caller.
|
||||
* @size_in: (input) Number of bytes to load from @payload_in.
|
||||
* @size_out: (input) Max number of bytes loaded into @payload_out.
|
||||
* (output) Number of bytes generated by the device. For fixed size
|
||||
* outputs commands this is always expected to be deterministic. For
|
||||
* variable sized output commands, it tells the exact number of bytes
|
||||
* written.
|
||||
* @return_code: (output) Error code returned from hardware.
|
||||
*
|
||||
* This is the primary mechanism used to send commands to the hardware.
|
||||
* All the fields except @payload_* correspond exactly to the fields described in
|
||||
* Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
|
||||
* @payload_out are written to, and read from the Command Payload Registers
|
||||
* defined in CXL 2.0 8.2.8.4.8.
|
||||
*/
|
||||
struct cxl_mbox_cmd {
|
||||
u16 opcode;
|
||||
void *payload_in;
|
||||
void *payload_out;
|
||||
size_t size_in;
|
||||
size_t size_out;
|
||||
u16 return_code;
|
||||
#define CXL_MBOX_SUCCESS 0
|
||||
};
|
||||
|
||||
/*
|
||||
* CXL 2.0 - Memory capacity multiplier
|
||||
* See Section 8.2.9.5
|
||||
*
|
||||
* Volatile, Persistent, and Partition capacities are specified to be in
|
||||
* multiples of 256MB - define a multiplier to convert to/from bytes.
|
||||
*/
|
||||
#define CXL_CAPACITY_MULTIPLIER SZ_256M
|
||||
|
||||
/**
|
||||
* struct cxl_mem - A CXL memory device
|
||||
* @pdev: The PCI device associated with this CXL device.
|
||||
* @dev: The device associated with this CXL device.
|
||||
* @cxlmd: Logical memory device chardev / interface
|
||||
* @regs: Parsed register blocks
|
||||
* @payload_size: Size of space for payload
|
||||
@ -78,11 +101,24 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
|
||||
* @mbox_mutex: Mutex to synchronize mailbox access.
|
||||
* @firmware_version: Firmware version for the memory device.
|
||||
* @enabled_cmds: Hardware commands found enabled in CEL.
|
||||
* @pmem_range: Persistent memory capacity information.
|
||||
* @ram_range: Volatile memory capacity information.
|
||||
* @exclusive_cmds: Commands that are kernel-internal only
|
||||
* @pmem_range: Active Persistent memory capacity configuration
|
||||
* @ram_range: Active Volatile memory capacity configuration
|
||||
* @total_bytes: sum of all possible capacities
|
||||
* @volatile_only_bytes: hard volatile capacity
|
||||
* @persistent_only_bytes: hard persistent capacity
|
||||
* @partition_align_bytes: alignment size for partition-able capacity
|
||||
* @active_volatile_bytes: sum of hard + soft volatile
|
||||
* @active_persistent_bytes: sum of hard + soft persistent
|
||||
* @next_volatile_bytes: volatile capacity change pending device reset
|
||||
* @next_persistent_bytes: persistent capacity change pending device reset
|
||||
* @mbox_send: @dev specific transport for transmitting mailbox commands
|
||||
*
|
||||
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
|
||||
* details on capacity parameters.
|
||||
*/
|
||||
struct cxl_mem {
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev;
|
||||
struct cxl_memdev *cxlmd;
|
||||
|
||||
struct cxl_regs regs;
|
||||
@ -91,7 +127,8 @@ struct cxl_mem {
|
||||
size_t lsa_size;
|
||||
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
|
||||
char firmware_version[0x10];
|
||||
unsigned long *enabled_cmds;
|
||||
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
|
||||
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
|
||||
|
||||
struct range pmem_range;
|
||||
struct range ram_range;
|
||||
@ -104,5 +141,124 @@ struct cxl_mem {
|
||||
u64 active_persistent_bytes;
|
||||
u64 next_volatile_bytes;
|
||||
u64 next_persistent_bytes;
|
||||
|
||||
int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
|
||||
};
|
||||
|
||||
enum cxl_opcode {
|
||||
CXL_MBOX_OP_INVALID = 0x0000,
|
||||
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
|
||||
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
|
||||
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
|
||||
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
|
||||
CXL_MBOX_OP_GET_LOG = 0x0401,
|
||||
CXL_MBOX_OP_IDENTIFY = 0x4000,
|
||||
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
|
||||
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
|
||||
CXL_MBOX_OP_GET_LSA = 0x4102,
|
||||
CXL_MBOX_OP_SET_LSA = 0x4103,
|
||||
CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
|
||||
CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201,
|
||||
CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202,
|
||||
CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203,
|
||||
CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
|
||||
CXL_MBOX_OP_GET_POISON = 0x4300,
|
||||
CXL_MBOX_OP_INJECT_POISON = 0x4301,
|
||||
CXL_MBOX_OP_CLEAR_POISON = 0x4302,
|
||||
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
|
||||
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
|
||||
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
|
||||
CXL_MBOX_OP_MAX = 0x10000
|
||||
};
|
||||
|
||||
#define DEFINE_CXL_CEL_UUID \
|
||||
UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 0xb1, 0x62, \
|
||||
0x3b, 0x3f, 0x17)
|
||||
|
||||
#define DEFINE_CXL_VENDOR_DEBUG_UUID \
|
||||
UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
|
||||
0x40, 0x3d, 0x86)
|
||||
|
||||
struct cxl_mbox_get_supported_logs {
|
||||
__le16 entries;
|
||||
u8 rsvd[6];
|
||||
struct cxl_gsl_entry {
|
||||
uuid_t uuid;
|
||||
__le32 size;
|
||||
} __packed entry[];
|
||||
} __packed;
|
||||
|
||||
struct cxl_cel_entry {
|
||||
__le16 opcode;
|
||||
__le16 effect;
|
||||
} __packed;
|
||||
|
||||
struct cxl_mbox_get_log {
|
||||
uuid_t uuid;
|
||||
__le32 offset;
|
||||
__le32 length;
|
||||
} __packed;
|
||||
|
||||
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
|
||||
struct cxl_mbox_identify {
|
||||
char fw_revision[0x10];
|
||||
__le64 total_capacity;
|
||||
__le64 volatile_capacity;
|
||||
__le64 persistent_capacity;
|
||||
__le64 partition_align;
|
||||
__le16 info_event_log_size;
|
||||
__le16 warning_event_log_size;
|
||||
__le16 failure_event_log_size;
|
||||
__le16 fatal_event_log_size;
|
||||
__le32 lsa_size;
|
||||
u8 poison_list_max_mer[3];
|
||||
__le16 inject_poison_limit;
|
||||
u8 poison_caps;
|
||||
u8 qos_telemetry_caps;
|
||||
} __packed;
|
||||
|
||||
struct cxl_mbox_get_lsa {
|
||||
u32 offset;
|
||||
u32 length;
|
||||
} __packed;
|
||||
|
||||
struct cxl_mbox_set_lsa {
|
||||
u32 offset;
|
||||
u32 reserved;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct cxl_mem_command - Driver representation of a memory device command
|
||||
* @info: Command information as it exists for the UAPI
|
||||
* @opcode: The actual bits used for the mailbox protocol
|
||||
* @flags: Set of flags effecting driver behavior.
|
||||
*
|
||||
* * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
|
||||
* will be enabled by the driver regardless of what hardware may have
|
||||
* advertised.
|
||||
*
|
||||
* The cxl_mem_command is the driver's internal representation of commands that
|
||||
* are supported by the driver. Some of these commands may not be supported by
|
||||
* the hardware. The driver will use @info to validate the fields passed in by
|
||||
* the user then submit the @opcode to the hardware.
|
||||
*
|
||||
* See struct cxl_command_info.
|
||||
*/
|
||||
struct cxl_mem_command {
|
||||
struct cxl_command_info info;
|
||||
enum cxl_opcode opcode;
|
||||
u32 flags;
|
||||
#define CXL_CMD_FLAG_NONE 0
|
||||
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
|
||||
};
|
||||
|
||||
int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
|
||||
size_t in_size, void *out, size_t out_size);
|
||||
int cxl_mem_identify(struct cxl_mem *cxlm);
|
||||
int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
|
||||
int cxl_mem_create_range_info(struct cxl_mem *cxlm);
|
||||
struct cxl_mem *cxl_mem_create(struct device *dev);
|
||||
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
|
||||
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
|
||||
#endif /* __CXL_MEM_H__ */
|
||||
|
1242
drivers/cxl/pci.c
1242
drivers/cxl/pci.c
File diff suppressed because it is too large
Load Diff
@ -20,13 +20,15 @@
|
||||
#define CXL_REGLOC_BIR_MASK GENMASK(2, 0)
|
||||
|
||||
/* Register Block Identifier (RBI) */
|
||||
#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
|
||||
#define CXL_REGLOC_RBI_EMPTY 0
|
||||
#define CXL_REGLOC_RBI_COMPONENT 1
|
||||
#define CXL_REGLOC_RBI_VIRT 2
|
||||
#define CXL_REGLOC_RBI_MEMDEV 3
|
||||
#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
|
||||
enum cxl_regloc_type {
|
||||
CXL_REGLOC_RBI_EMPTY = 0,
|
||||
CXL_REGLOC_RBI_COMPONENT,
|
||||
CXL_REGLOC_RBI_VIRT,
|
||||
CXL_REGLOC_RBI_MEMDEV,
|
||||
CXL_REGLOC_RBI_TYPES
|
||||
};
|
||||
|
||||
#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
|
||||
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
|
||||
|
||||
#endif /* __CXL_PCI_H__ */
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ndctl.h>
|
||||
@ -16,48 +17,55 @@
|
||||
*/
|
||||
static struct workqueue_struct *cxl_pmem_wq;
|
||||
|
||||
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
|
||||
|
||||
static void clear_exclusive(void *cxlm)
|
||||
{
|
||||
clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
|
||||
}
|
||||
|
||||
static void unregister_nvdimm(void *nvdimm)
|
||||
{
|
||||
nvdimm_delete(nvdimm);
|
||||
}
|
||||
|
||||
static int match_nvdimm_bridge(struct device *dev, const void *data)
|
||||
{
|
||||
return strcmp(dev_name(dev), "nvdimm-bridge") == 0;
|
||||
}
|
||||
|
||||
static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
return to_cxl_nvdimm_bridge(dev);
|
||||
}
|
||||
|
||||
static int cxl_nvdimm_probe(struct device *dev)
|
||||
{
|
||||
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
|
||||
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
||||
unsigned long flags = 0, cmd_mask = 0;
|
||||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
struct cxl_nvdimm_bridge *cxl_nvb;
|
||||
unsigned long flags = 0;
|
||||
struct nvdimm *nvdimm;
|
||||
int rc = -ENXIO;
|
||||
int rc;
|
||||
|
||||
cxl_nvb = cxl_find_nvdimm_bridge();
|
||||
cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
|
||||
if (!cxl_nvb)
|
||||
return -ENXIO;
|
||||
|
||||
device_lock(&cxl_nvb->dev);
|
||||
if (!cxl_nvb->nvdimm_bus)
|
||||
if (!cxl_nvb->nvdimm_bus) {
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
set_exclusive_cxl_commands(cxlm, exclusive_cmds);
|
||||
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
set_bit(NDD_LABELING, &flags);
|
||||
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0,
|
||||
NULL);
|
||||
if (!nvdimm)
|
||||
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
|
||||
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
|
||||
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
|
||||
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
|
||||
cmd_mask, 0, NULL);
|
||||
if (!nvdimm) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, nvdimm);
|
||||
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
|
||||
out:
|
||||
device_unlock(&cxl_nvb->dev);
|
||||
@ -72,11 +80,120 @@ static struct cxl_driver cxl_nvdimm_driver = {
|
||||
.id = CXL_DEVICE_NVDIMM,
|
||||
};
|
||||
|
||||
static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
|
||||
struct nd_cmd_get_config_size *cmd,
|
||||
unsigned int buf_len)
|
||||
{
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
*cmd = (struct nd_cmd_get_config_size) {
|
||||
.config_size = cxlm->lsa_size,
|
||||
.max_xfer = cxlm->payload_size,
|
||||
};
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
|
||||
struct nd_cmd_get_config_data_hdr *cmd,
|
||||
unsigned int buf_len)
|
||||
{
|
||||
struct cxl_mbox_get_lsa get_lsa;
|
||||
int rc;
|
||||
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
get_lsa = (struct cxl_mbox_get_lsa) {
|
||||
.offset = cmd->in_offset,
|
||||
.length = cmd->in_length,
|
||||
};
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
|
||||
sizeof(get_lsa), cmd->out_buf,
|
||||
cmd->in_length);
|
||||
cmd->status = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
|
||||
struct nd_cmd_set_config_hdr *cmd,
|
||||
unsigned int buf_len)
|
||||
{
|
||||
struct cxl_mbox_set_lsa *set_lsa;
|
||||
int rc;
|
||||
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
/* 4-byte status follows the input data in the payload */
|
||||
if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
set_lsa =
|
||||
kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
|
||||
if (!set_lsa)
|
||||
return -ENOMEM;
|
||||
|
||||
*set_lsa = (struct cxl_mbox_set_lsa) {
|
||||
.offset = cmd->in_offset,
|
||||
};
|
||||
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
|
||||
|
||||
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
|
||||
struct_size(set_lsa, data, cmd->in_length),
|
||||
NULL, 0);
|
||||
|
||||
/*
|
||||
* Set "firmware" status (4-packed bytes at the end of the input
|
||||
* payload.
|
||||
*/
|
||||
put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
|
||||
kvfree(set_lsa);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
|
||||
void *buf, unsigned int buf_len)
|
||||
{
|
||||
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
||||
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
|
||||
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
||||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
|
||||
if (!test_bit(cmd, &cmd_mask))
|
||||
return -ENOTTY;
|
||||
|
||||
switch (cmd) {
|
||||
case ND_CMD_GET_CONFIG_SIZE:
|
||||
return cxl_pmem_get_config_size(cxlm, buf, buf_len);
|
||||
case ND_CMD_GET_CONFIG_DATA:
|
||||
return cxl_pmem_get_config_data(cxlm, buf, buf_len);
|
||||
case ND_CMD_SET_CONFIG_DATA:
|
||||
return cxl_pmem_set_config_data(cxlm, buf, buf_len);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
||||
static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
|
||||
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
|
||||
unsigned int buf_len, int *cmd_rc)
|
||||
{
|
||||
return -ENOTTY;
|
||||
/*
|
||||
* No firmware response to translate, let the transport error
|
||||
* code take precedence.
|
||||
*/
|
||||
*cmd_rc = 0;
|
||||
|
||||
if (!nvdimm)
|
||||
return -ENOTTY;
|
||||
return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
|
||||
}
|
||||
|
||||
static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
|
||||
@ -194,6 +311,10 @@ static __init int cxl_pmem_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
|
||||
set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
|
||||
set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
|
||||
|
||||
cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
|
||||
if (!cxl_pmem_wq)
|
||||
return -ENXIO;
|
||||
|
@ -33,18 +33,7 @@
|
||||
|
||||
static int find_dvsec(struct pci_dev *dev, int dvsec_id)
|
||||
{
|
||||
int vsec = 0;
|
||||
u16 vendor, id;
|
||||
|
||||
while ((vsec = pci_find_next_ext_capability(dev, vsec,
|
||||
OCXL_EXT_CAP_ID_DVSEC))) {
|
||||
pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
|
||||
&vendor);
|
||||
pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
|
||||
if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id)
|
||||
return vsec;
|
||||
}
|
||||
return 0;
|
||||
return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id);
|
||||
}
|
||||
|
||||
static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
|
||||
|
@ -973,7 +973,7 @@ static int btt_arena_write_layout(struct arena_info *arena)
|
||||
u64 sum;
|
||||
struct btt_sb *super;
|
||||
struct nd_btt *nd_btt = arena->nd_btt;
|
||||
const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
|
||||
const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
|
||||
|
||||
ret = btt_map_init(arena);
|
||||
if (ret)
|
||||
@ -988,8 +988,8 @@ static int btt_arena_write_layout(struct arena_info *arena)
|
||||
return -ENOMEM;
|
||||
|
||||
strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
|
||||
memcpy(super->uuid, nd_btt->uuid, 16);
|
||||
memcpy(super->parent_uuid, parent_uuid, 16);
|
||||
export_uuid(super->uuid, nd_btt->uuid);
|
||||
export_uuid(super->parent_uuid, parent_uuid);
|
||||
super->flags = cpu_to_le32(arena->flags);
|
||||
super->version_major = cpu_to_le16(arena->version_major);
|
||||
super->version_minor = cpu_to_le16(arena->version_minor);
|
||||
@ -1574,7 +1574,8 @@ static void btt_blk_cleanup(struct btt *btt)
|
||||
* Pointer to a new struct btt on success, NULL on failure.
|
||||
*/
|
||||
static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
|
||||
u32 lbasize, u8 *uuid, struct nd_region *nd_region)
|
||||
u32 lbasize, uuid_t *uuid,
|
||||
struct nd_region *nd_region)
|
||||
{
|
||||
int ret;
|
||||
struct btt *btt;
|
||||
@ -1693,7 +1694,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
|
||||
}
|
||||
nd_region = to_nd_region(nd_btt->dev.parent);
|
||||
btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
|
||||
nd_region);
|
||||
nd_region);
|
||||
if (!btt)
|
||||
return -ENOMEM;
|
||||
nd_btt->btt = btt;
|
||||
|
@ -180,8 +180,8 @@ bool is_nd_btt(struct device *dev)
|
||||
EXPORT_SYMBOL(is_nd_btt);
|
||||
|
||||
static struct device *__nd_btt_create(struct nd_region *nd_region,
|
||||
unsigned long lbasize, u8 *uuid,
|
||||
struct nd_namespace_common *ndns)
|
||||
unsigned long lbasize, uuid_t *uuid,
|
||||
struct nd_namespace_common *ndns)
|
||||
{
|
||||
struct nd_btt *nd_btt;
|
||||
struct device *dev;
|
||||
@ -244,14 +244,16 @@ struct device *nd_btt_create(struct nd_region *nd_region)
|
||||
*/
|
||||
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
|
||||
{
|
||||
const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
|
||||
const uuid_t *ns_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
|
||||
uuid_t parent_uuid;
|
||||
u64 checksum;
|
||||
|
||||
if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
|
||||
return false;
|
||||
|
||||
if (!guid_is_null((guid_t *)&super->parent_uuid))
|
||||
if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
|
||||
import_uuid(&parent_uuid, super->parent_uuid);
|
||||
if (!uuid_is_null(&parent_uuid))
|
||||
if (!uuid_equal(&parent_uuid, ns_uuid))
|
||||
return false;
|
||||
|
||||
checksum = le64_to_cpu(super->checksum);
|
||||
@ -319,7 +321,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
|
||||
return rc;
|
||||
|
||||
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
|
||||
nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
|
||||
nd_btt->uuid = kmemdup(&btt_sb->uuid, sizeof(uuid_t), GFP_KERNEL);
|
||||
if (!nd_btt->uuid)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -207,38 +207,6 @@ struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
|
||||
|
||||
static bool is_uuid_sep(char sep)
|
||||
{
|
||||
if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
const char *str = buf;
|
||||
u8 uuid[16];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (!isxdigit(str[0]) || !isxdigit(str[1])) {
|
||||
dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
|
||||
i, str - buf, str[0],
|
||||
str + 1 - buf, str[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
|
||||
str += 2;
|
||||
if (is_uuid_sep(*str))
|
||||
str++;
|
||||
}
|
||||
|
||||
memcpy(uuid_out, uuid, sizeof(uuid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
|
||||
* @dev: container device for the uuid property
|
||||
@ -249,21 +217,21 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
|
||||
* (driver detached)
|
||||
* LOCKING: expects nd_device_lock() is held on entry
|
||||
*/
|
||||
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
|
||||
int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
u8 uuid[16];
|
||||
uuid_t uuid;
|
||||
int rc;
|
||||
|
||||
if (dev->driver)
|
||||
return -EBUSY;
|
||||
|
||||
rc = nd_uuid_parse(dev, uuid, buf, len);
|
||||
rc = uuid_parse(buf, &uuid);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
kfree(*uuid_out);
|
||||
*uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
|
||||
*uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL);
|
||||
if (!(*uuid_out))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -17,6 +17,14 @@ static guid_t nvdimm_btt2_guid;
|
||||
static guid_t nvdimm_pfn_guid;
|
||||
static guid_t nvdimm_dax_guid;
|
||||
|
||||
static uuid_t nvdimm_btt_uuid;
|
||||
static uuid_t nvdimm_btt2_uuid;
|
||||
static uuid_t nvdimm_pfn_uuid;
|
||||
static uuid_t nvdimm_dax_uuid;
|
||||
|
||||
static uuid_t cxl_region_uuid;
|
||||
static uuid_t cxl_namespace_uuid;
|
||||
|
||||
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
|
||||
|
||||
static u32 best_seq(u32 a, u32 b)
|
||||
@ -321,7 +329,8 @@ static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
|
||||
return true;
|
||||
}
|
||||
|
||||
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
|
||||
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
|
||||
u32 flags)
|
||||
{
|
||||
if (!label_id || !uuid)
|
||||
return NULL;
|
||||
@ -351,7 +360,7 @@ static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
|
||||
{
|
||||
u64 sum, sum_save;
|
||||
|
||||
if (!namespace_label_has(ndd, checksum))
|
||||
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
|
||||
return true;
|
||||
|
||||
sum_save = nsl_get_checksum(ndd, nd_label);
|
||||
@ -366,7 +375,7 @@ static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
|
||||
{
|
||||
u64 sum;
|
||||
|
||||
if (!namespace_label_has(ndd, checksum))
|
||||
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
|
||||
return;
|
||||
nsl_set_checksum(ndd, nd_label, 0);
|
||||
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
|
||||
@ -400,9 +409,9 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
|
||||
struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
|
||||
struct nd_namespace_label *nd_label;
|
||||
struct nd_region *nd_region = NULL;
|
||||
u8 label_uuid[NSLABEL_UUID_LEN];
|
||||
struct nd_label_id label_id;
|
||||
struct resource *res;
|
||||
uuid_t label_uuid;
|
||||
u32 flags;
|
||||
|
||||
nd_label = to_label(ndd, slot);
|
||||
@ -410,11 +419,11 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
|
||||
if (!slot_valid(ndd, nd_label, slot))
|
||||
continue;
|
||||
|
||||
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
nsl_get_uuid(ndd, nd_label, &label_uuid);
|
||||
flags = nsl_get_flags(ndd, nd_label);
|
||||
if (test_bit(NDD_NOBLK, &nvdimm->flags))
|
||||
flags &= ~NSLABEL_FLAG_LOCAL;
|
||||
nd_label_gen_id(&label_id, label_uuid, flags);
|
||||
nd_label_gen_id(&label_id, &label_uuid, flags);
|
||||
res = nvdimm_allocate_dpa(ndd, &label_id,
|
||||
nsl_get_dpa(ndd, nd_label),
|
||||
nsl_get_rawsize(ndd, nd_label));
|
||||
@ -724,7 +733,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
|
||||
- (unsigned long) to_namespace_index(ndd, 0);
|
||||
}
|
||||
|
||||
static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
|
||||
static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid)
|
||||
{
|
||||
if (guid_equal(guid, &nvdimm_btt_guid))
|
||||
return NVDIMM_CCLASS_BTT;
|
||||
@ -740,6 +749,23 @@ static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
|
||||
return NVDIMM_CCLASS_UNKNOWN;
|
||||
}
|
||||
|
||||
/* CXL labels store UUIDs instead of GUIDs for the same data */
|
||||
static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid)
|
||||
{
|
||||
if (uuid_equal(uuid, &nvdimm_btt_uuid))
|
||||
return NVDIMM_CCLASS_BTT;
|
||||
else if (uuid_equal(uuid, &nvdimm_btt2_uuid))
|
||||
return NVDIMM_CCLASS_BTT2;
|
||||
else if (uuid_equal(uuid, &nvdimm_pfn_uuid))
|
||||
return NVDIMM_CCLASS_PFN;
|
||||
else if (uuid_equal(uuid, &nvdimm_dax_uuid))
|
||||
return NVDIMM_CCLASS_DAX;
|
||||
else if (uuid_equal(uuid, &uuid_null))
|
||||
return NVDIMM_CCLASS_NONE;
|
||||
|
||||
return NVDIMM_CCLASS_UNKNOWN;
|
||||
}
|
||||
|
||||
static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
|
||||
guid_t *target)
|
||||
{
|
||||
@ -761,6 +787,28 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
|
||||
return &guid_null;
|
||||
}
|
||||
|
||||
/* CXL labels store UUIDs instead of GUIDs for the same data */
|
||||
static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class,
|
||||
uuid_t *target)
|
||||
{
|
||||
if (claim_class == NVDIMM_CCLASS_BTT)
|
||||
return &nvdimm_btt_uuid;
|
||||
else if (claim_class == NVDIMM_CCLASS_BTT2)
|
||||
return &nvdimm_btt2_uuid;
|
||||
else if (claim_class == NVDIMM_CCLASS_PFN)
|
||||
return &nvdimm_pfn_uuid;
|
||||
else if (claim_class == NVDIMM_CCLASS_DAX)
|
||||
return &nvdimm_dax_uuid;
|
||||
else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
|
||||
/*
|
||||
* If we're modifying a namespace for which we don't
|
||||
* know the claim_class, don't touch the existing uuid.
|
||||
*/
|
||||
return target;
|
||||
} else
|
||||
return &uuid_null;
|
||||
}
|
||||
|
||||
static void reap_victim(struct nd_mapping *nd_mapping,
|
||||
struct nd_label_ent *victim)
|
||||
{
|
||||
@ -775,18 +823,18 @@ static void reap_victim(struct nd_mapping *nd_mapping,
|
||||
static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, guid_t *guid)
|
||||
{
|
||||
if (namespace_label_has(ndd, type_guid))
|
||||
guid_copy(&nd_label->type_guid, guid);
|
||||
if (efi_namespace_label_has(ndd, type_guid))
|
||||
guid_copy(&nd_label->efi.type_guid, guid);
|
||||
}
|
||||
|
||||
bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, guid_t *guid)
|
||||
{
|
||||
if (!namespace_label_has(ndd, type_guid))
|
||||
if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
|
||||
return true;
|
||||
if (!guid_equal(&nd_label->type_guid, guid)) {
|
||||
if (!guid_equal(&nd_label->efi.type_guid, guid)) {
|
||||
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
|
||||
&nd_label->type_guid);
|
||||
&nd_label->efi.type_guid);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -796,19 +844,34 @@ static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
enum nvdimm_claim_class claim_class)
|
||||
{
|
||||
if (!namespace_label_has(ndd, abstraction_guid))
|
||||
if (ndd->cxl) {
|
||||
uuid_t uuid;
|
||||
|
||||
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
|
||||
export_uuid(nd_label->cxl.abstraction_uuid,
|
||||
to_abstraction_uuid(claim_class, &uuid));
|
||||
return;
|
||||
guid_copy(&nd_label->abstraction_guid,
|
||||
}
|
||||
|
||||
if (!efi_namespace_label_has(ndd, abstraction_guid))
|
||||
return;
|
||||
guid_copy(&nd_label->efi.abstraction_guid,
|
||||
to_abstraction_guid(claim_class,
|
||||
&nd_label->abstraction_guid));
|
||||
&nd_label->efi.abstraction_guid));
|
||||
}
|
||||
|
||||
enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
if (!namespace_label_has(ndd, abstraction_guid))
|
||||
if (ndd->cxl) {
|
||||
uuid_t uuid;
|
||||
|
||||
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
|
||||
return uuid_to_nvdimm_cclass(&uuid);
|
||||
}
|
||||
if (!efi_namespace_label_has(ndd, abstraction_guid))
|
||||
return NVDIMM_CCLASS_NONE;
|
||||
return to_nvdimm_cclass(&nd_label->abstraction_guid);
|
||||
return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid);
|
||||
}
|
||||
|
||||
static int __pmem_label_update(struct nd_region *nd_region,
|
||||
@ -851,10 +914,11 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
||||
|
||||
nd_label = to_label(ndd, slot);
|
||||
memset(nd_label, 0, sizeof_namespace_label(ndd));
|
||||
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
|
||||
nsl_set_uuid(ndd, nd_label, nspm->uuid);
|
||||
nsl_set_name(ndd, nd_label, nspm->alt_name);
|
||||
nsl_set_flags(ndd, nd_label, flags);
|
||||
nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
|
||||
nsl_set_nrange(ndd, nd_label, 1);
|
||||
nsl_set_position(ndd, nd_label, pos);
|
||||
nsl_set_isetcookie(ndd, nd_label, cookie);
|
||||
nsl_set_rawsize(ndd, nd_label, resource_size(res));
|
||||
@ -878,9 +942,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
||||
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
||||
if (!label_ent->label)
|
||||
continue;
|
||||
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
|
||||
|| memcmp(nspm->uuid, label_ent->label->uuid,
|
||||
NSLABEL_UUID_LEN) == 0)
|
||||
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
|
||||
nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
|
||||
reap_victim(nd_mapping, label_ent);
|
||||
}
|
||||
|
||||
@ -941,7 +1004,7 @@ static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 isetcookie)
|
||||
{
|
||||
if (namespace_label_has(ndd, type_guid)) {
|
||||
if (efi_namespace_label_has(ndd, type_guid)) {
|
||||
nsl_set_isetcookie(ndd, nd_label, isetcookie);
|
||||
return;
|
||||
}
|
||||
@ -952,7 +1015,7 @@ bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 isetcookie)
|
||||
{
|
||||
if (!namespace_label_has(ndd, type_guid))
|
||||
if (!efi_namespace_label_has(ndd, type_guid))
|
||||
return true;
|
||||
|
||||
if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
|
||||
@ -968,7 +1031,7 @@ static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, int nlabel,
|
||||
bool first)
|
||||
{
|
||||
if (!namespace_label_has(ndd, type_guid)) {
|
||||
if (!efi_namespace_label_has(ndd, type_guid)) {
|
||||
nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
|
||||
return;
|
||||
}
|
||||
@ -979,7 +1042,7 @@ static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
bool first)
|
||||
{
|
||||
if (!namespace_label_has(ndd, type_guid)) {
|
||||
if (!efi_namespace_label_has(ndd, type_guid)) {
|
||||
nsl_set_position(ndd, nd_label, 0);
|
||||
return;
|
||||
}
|
||||
@ -1005,7 +1068,6 @@ static int __blk_label_update(struct nd_region *nd_region,
|
||||
unsigned long *free, *victim_map = NULL;
|
||||
struct resource *res, **old_res_list;
|
||||
struct nd_label_id label_id;
|
||||
u8 uuid[NSLABEL_UUID_LEN];
|
||||
int min_dpa_idx = 0;
|
||||
LIST_HEAD(list);
|
||||
u32 nslot, slot;
|
||||
@ -1043,8 +1105,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
||||
/* mark unused labels for garbage collection */
|
||||
for_each_clear_bit_le(slot, free, nslot) {
|
||||
nd_label = to_label(ndd, slot);
|
||||
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
|
||||
continue;
|
||||
res = to_resource(ndd, nd_label);
|
||||
if (res && is_old_resource(res, old_res_list,
|
||||
@ -1113,7 +1174,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
||||
|
||||
nd_label = to_label(ndd, slot);
|
||||
memset(nd_label, 0, sizeof_namespace_label(ndd));
|
||||
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
|
||||
nsl_set_uuid(ndd, nd_label, nsblk->uuid);
|
||||
nsl_set_name(ndd, nd_label, nsblk->alt_name);
|
||||
nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
|
||||
|
||||
@ -1161,8 +1222,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
||||
if (!nd_label)
|
||||
continue;
|
||||
nlabel++;
|
||||
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
|
||||
continue;
|
||||
nlabel--;
|
||||
list_move(&label_ent->list, &list);
|
||||
@ -1192,8 +1252,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
||||
}
|
||||
for_each_clear_bit_le(slot, free, nslot) {
|
||||
nd_label = to_label(ndd, slot);
|
||||
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
|
||||
continue;
|
||||
res = to_resource(ndd, nd_label);
|
||||
res->flags &= ~DPA_RESOURCE_ADJUSTED;
|
||||
@ -1273,12 +1332,11 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
|
||||
return max(num_labels, old_num_labels);
|
||||
}
|
||||
|
||||
static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
||||
static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid)
|
||||
{
|
||||
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
||||
struct nd_label_ent *label_ent, *e;
|
||||
struct nd_namespace_index *nsindex;
|
||||
u8 label_uuid[NSLABEL_UUID_LEN];
|
||||
unsigned long *free;
|
||||
LIST_HEAD(list);
|
||||
u32 nslot, slot;
|
||||
@ -1298,8 +1356,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
||||
if (!nd_label)
|
||||
continue;
|
||||
active++;
|
||||
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, uuid))
|
||||
continue;
|
||||
active--;
|
||||
slot = to_slot(ndd, nd_label);
|
||||
@ -1395,5 +1452,13 @@ int __init nd_label_init(void)
|
||||
WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
|
||||
WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
|
||||
|
||||
WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid));
|
||||
WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid));
|
||||
WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid));
|
||||
WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid));
|
||||
|
||||
WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid));
|
||||
WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ enum {
|
||||
* struct nd_namespace_index - label set superblock
|
||||
* @sig: NAMESPACE_INDEX\0
|
||||
* @flags: placeholder
|
||||
* @labelsize: log2 size (v1 labels 128 bytes v2 labels 256 bytes)
|
||||
* @seq: sequence number for this index
|
||||
* @myoff: offset of this index in label area
|
||||
* @mysize: size of this index struct
|
||||
@ -43,7 +44,7 @@ enum {
|
||||
* @major: label area major version
|
||||
* @minor: label area minor version
|
||||
* @checksum: fletcher64 of all fields
|
||||
* @free[0]: bitmap, nlabel bits
|
||||
* @free: bitmap, nlabel bits
|
||||
*
|
||||
* The size of free[] is rounded up so the total struct size is a
|
||||
* multiple of NSINDEX_ALIGN bytes. Any bits this allocates beyond
|
||||
@ -66,7 +67,39 @@ struct nd_namespace_index {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nd_namespace_label - namespace superblock
|
||||
* struct cxl_region_label - CXL 2.0 Table 211
|
||||
* @type: uuid identifying this label format (region)
|
||||
* @uuid: uuid for the region this label describes
|
||||
* @flags: NSLABEL_FLAG_UPDATING (all other flags reserved)
|
||||
* @nlabel: 1 per interleave-way in the region
|
||||
* @position: this label's position in the set
|
||||
* @dpa: start address in device-local capacity for this label
|
||||
* @rawsize: size of this label's contribution to region
|
||||
* @hpa: mandatory system physical address to map this region
|
||||
* @slot: slot id of this label in label area
|
||||
* @ig: interleave granularity (1 << @ig) * 256 bytes
|
||||
* @align: alignment in SZ_256M blocks
|
||||
* @reserved: reserved
|
||||
* @checksum: fletcher64 sum of this label
|
||||
*/
|
||||
struct cxl_region_label {
|
||||
u8 type[NSLABEL_UUID_LEN];
|
||||
u8 uuid[NSLABEL_UUID_LEN];
|
||||
__le32 flags;
|
||||
__le16 nlabel;
|
||||
__le16 position;
|
||||
__le64 dpa;
|
||||
__le64 rawsize;
|
||||
__le64 hpa;
|
||||
__le32 slot;
|
||||
__le32 ig;
|
||||
__le32 align;
|
||||
u8 reserved[0xac];
|
||||
__le64 checksum;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvdimm_efi_label - namespace superblock
|
||||
* @uuid: UUID per RFC 4122
|
||||
* @name: optional name (NULL-terminated)
|
||||
* @flags: see NSLABEL_FLAG_*
|
||||
@ -77,9 +110,14 @@ struct nd_namespace_index {
|
||||
* @dpa: DPA of NVM range on this DIMM
|
||||
* @rawsize: size of namespace
|
||||
* @slot: slot of this label in label area
|
||||
* @unused: must be zero
|
||||
* @align: physical address alignment of the namespace
|
||||
* @reserved: reserved
|
||||
* @type_guid: copy of struct acpi_nfit_system_address.range_guid
|
||||
* @abstraction_guid: personality id (btt, btt2, fsdax, devdax....)
|
||||
* @reserved2: reserved
|
||||
* @checksum: fletcher64 sum of this object
|
||||
*/
|
||||
struct nd_namespace_label {
|
||||
struct nvdimm_efi_label {
|
||||
u8 uuid[NSLABEL_UUID_LEN];
|
||||
u8 name[NSLABEL_NAME_LEN];
|
||||
__le32 flags;
|
||||
@ -92,7 +130,7 @@ struct nd_namespace_label {
|
||||
__le32 slot;
|
||||
/*
|
||||
* Accessing fields past this point should be gated by a
|
||||
* namespace_label_has() check.
|
||||
* efi_namespace_label_has() check.
|
||||
*/
|
||||
u8 align;
|
||||
u8 reserved[3];
|
||||
@ -102,11 +140,57 @@ struct nd_namespace_label {
|
||||
__le64 checksum;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvdimm_cxl_label - CXL 2.0 Table 212
|
||||
* @type: uuid identifying this label format (namespace)
|
||||
* @uuid: uuid for the namespace this label describes
|
||||
* @name: friendly name for the namespace
|
||||
* @flags: NSLABEL_FLAG_UPDATING (all other flags reserved)
|
||||
* @nrange: discontiguous namespace support
|
||||
* @position: this label's position in the set
|
||||
* @dpa: start address in device-local capacity for this label
|
||||
* @rawsize: size of this label's contribution to namespace
|
||||
* @slot: slot id of this label in label area
|
||||
* @align: alignment in SZ_256M blocks
|
||||
* @region_uuid: host interleave set identifier
|
||||
* @abstraction_uuid: personality driver for this namespace
|
||||
* @lbasize: address geometry for disk-like personalities
|
||||
* @reserved: reserved
|
||||
* @checksum: fletcher64 sum of this label
|
||||
*/
|
||||
struct nvdimm_cxl_label {
|
||||
u8 type[NSLABEL_UUID_LEN];
|
||||
u8 uuid[NSLABEL_UUID_LEN];
|
||||
u8 name[NSLABEL_NAME_LEN];
|
||||
__le32 flags;
|
||||
__le16 nrange;
|
||||
__le16 position;
|
||||
__le64 dpa;
|
||||
__le64 rawsize;
|
||||
__le32 slot;
|
||||
__le32 align;
|
||||
u8 region_uuid[16];
|
||||
u8 abstraction_uuid[16];
|
||||
__le16 lbasize;
|
||||
u8 reserved[0x56];
|
||||
__le64 checksum;
|
||||
};
|
||||
|
||||
struct nd_namespace_label {
|
||||
union {
|
||||
struct nvdimm_cxl_label cxl;
|
||||
struct nvdimm_efi_label efi;
|
||||
};
|
||||
};
|
||||
|
||||
#define NVDIMM_BTT_GUID "8aed63a2-29a2-4c66-8b12-f05d15d3922a"
|
||||
#define NVDIMM_BTT2_GUID "18633bfc-1735-4217-8ac9-17239282d3f8"
|
||||
#define NVDIMM_PFN_GUID "266400ba-fb9f-4677-bcb0-968f11d0d225"
|
||||
#define NVDIMM_DAX_GUID "97a86d9c-3cdd-4eda-986f-5068b4f80088"
|
||||
|
||||
#define CXL_REGION_UUID "529d7c61-da07-47c4-a93f-ecdf2c06f444"
|
||||
#define CXL_NAMESPACE_UUID "68bb2c0a-5a77-4937-9f85-3caf41a0f93c"
|
||||
|
||||
/**
|
||||
* struct nd_label_id - identifier string for dpa allocation
|
||||
* @id: "{blk|pmem}-<namespace uuid>"
|
||||
|
@ -51,7 +51,7 @@ static bool is_namespace_io(const struct device *dev);
|
||||
|
||||
static int is_uuid_busy(struct device *dev, void *data)
|
||||
{
|
||||
u8 *uuid1 = data, *uuid2 = NULL;
|
||||
uuid_t *uuid1 = data, *uuid2 = NULL;
|
||||
|
||||
if (is_namespace_pmem(dev)) {
|
||||
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
||||
@ -71,7 +71,7 @@ static int is_uuid_busy(struct device *dev, void *data)
|
||||
uuid2 = nd_pfn->uuid;
|
||||
}
|
||||
|
||||
if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
|
||||
if (uuid2 && uuid_equal(uuid1, uuid2))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
@ -89,7 +89,7 @@ static int is_namespace_uuid_busy(struct device *dev, void *data)
|
||||
* @dev: any device on a nvdimm_bus
|
||||
* @uuid: uuid to check
|
||||
*/
|
||||
bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
||||
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
|
||||
{
|
||||
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
||||
|
||||
@ -192,12 +192,10 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
|
||||
}
|
||||
EXPORT_SYMBOL(nvdimm_namespace_disk_name);
|
||||
|
||||
const u8 *nd_dev_to_uuid(struct device *dev)
|
||||
const uuid_t *nd_dev_to_uuid(struct device *dev)
|
||||
{
|
||||
static const u8 null_uuid[16];
|
||||
|
||||
if (!dev)
|
||||
return null_uuid;
|
||||
return &uuid_null;
|
||||
|
||||
if (is_namespace_pmem(dev)) {
|
||||
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
||||
@ -208,7 +206,7 @@ const u8 *nd_dev_to_uuid(struct device *dev)
|
||||
|
||||
return nsblk->uuid;
|
||||
} else
|
||||
return null_uuid;
|
||||
return &uuid_null;
|
||||
}
|
||||
EXPORT_SYMBOL(nd_dev_to_uuid);
|
||||
|
||||
@ -938,7 +936,8 @@ static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
|
||||
res->end = res->start + size - 1;
|
||||
}
|
||||
|
||||
static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
|
||||
static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
|
||||
const char *where)
|
||||
{
|
||||
if (!uuid) {
|
||||
dev_dbg(dev, "%s: uuid not set\n", where);
|
||||
@ -957,7 +956,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
|
||||
struct nd_label_id label_id;
|
||||
u32 flags = 0, remainder;
|
||||
int rc, i, id = -1;
|
||||
u8 *uuid = NULL;
|
||||
uuid_t *uuid = NULL;
|
||||
|
||||
if (dev->driver || ndns->claim)
|
||||
return -EBUSY;
|
||||
@ -1050,7 +1049,7 @@ static ssize_t size_store(struct device *dev,
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
unsigned long long val;
|
||||
u8 **uuid = NULL;
|
||||
uuid_t **uuid = NULL;
|
||||
int rc;
|
||||
|
||||
rc = kstrtoull(buf, 0, &val);
|
||||
@ -1147,7 +1146,7 @@ static ssize_t size_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR(size, 0444, size_show, size_store);
|
||||
|
||||
static u8 *namespace_to_uuid(struct device *dev)
|
||||
static uuid_t *namespace_to_uuid(struct device *dev)
|
||||
{
|
||||
if (is_namespace_pmem(dev)) {
|
||||
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
||||
@ -1161,10 +1160,10 @@ static u8 *namespace_to_uuid(struct device *dev)
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
static ssize_t uuid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u8 *uuid = namespace_to_uuid(dev);
|
||||
uuid_t *uuid = namespace_to_uuid(dev);
|
||||
|
||||
if (IS_ERR(uuid))
|
||||
return PTR_ERR(uuid);
|
||||
@ -1181,7 +1180,8 @@ static ssize_t uuid_show(struct device *dev,
|
||||
* @old_uuid: reference to the uuid storage location in the namespace object
|
||||
*/
|
||||
static int namespace_update_uuid(struct nd_region *nd_region,
|
||||
struct device *dev, u8 *new_uuid, u8 **old_uuid)
|
||||
struct device *dev, uuid_t *new_uuid,
|
||||
uuid_t **old_uuid)
|
||||
{
|
||||
u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
|
||||
struct nd_label_id old_label_id;
|
||||
@ -1231,10 +1231,12 @@ static int namespace_update_uuid(struct nd_region *nd_region,
|
||||
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
||||
struct nd_namespace_label *nd_label = label_ent->label;
|
||||
struct nd_label_id label_id;
|
||||
uuid_t uuid;
|
||||
|
||||
if (!nd_label)
|
||||
continue;
|
||||
nd_label_gen_id(&label_id, nd_label->uuid,
|
||||
nsl_get_uuid(ndd, nd_label, &uuid);
|
||||
nd_label_gen_id(&label_id, &uuid,
|
||||
nsl_get_flags(ndd, nd_label));
|
||||
if (strcmp(old_label_id.id, label_id.id) == 0)
|
||||
set_bit(ND_LABEL_REAP, &label_ent->flags);
|
||||
@ -1251,9 +1253,9 @@ static ssize_t uuid_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
u8 *uuid = NULL;
|
||||
uuid_t *uuid = NULL;
|
||||
uuid_t **ns_uuid;
|
||||
ssize_t rc = 0;
|
||||
u8 **ns_uuid;
|
||||
|
||||
if (is_namespace_pmem(dev)) {
|
||||
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
||||
@ -1378,8 +1380,8 @@ static ssize_t dpa_extents_show(struct device *dev,
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_label_id label_id;
|
||||
uuid_t *uuid = NULL;
|
||||
int count = 0, i;
|
||||
u8 *uuid = NULL;
|
||||
u32 flags = 0;
|
||||
|
||||
nvdimm_bus_lock(dev);
|
||||
@ -1831,8 +1833,8 @@ static struct device **create_namespace_io(struct nd_region *nd_region)
|
||||
return devs;
|
||||
}
|
||||
|
||||
static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
||||
u64 cookie, u16 pos)
|
||||
static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
|
||||
u64 cookie, u16 pos)
|
||||
{
|
||||
struct nd_namespace_label *found = NULL;
|
||||
int i;
|
||||
@ -1846,17 +1848,16 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
||||
|
||||
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
||||
struct nd_namespace_label *nd_label = label_ent->label;
|
||||
u16 position, nlabel;
|
||||
u16 position;
|
||||
|
||||
if (!nd_label)
|
||||
continue;
|
||||
position = nsl_get_position(ndd, nd_label);
|
||||
nlabel = nsl_get_nlabel(ndd, nd_label);
|
||||
|
||||
if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
|
||||
continue;
|
||||
|
||||
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, uuid))
|
||||
continue;
|
||||
|
||||
if (!nsl_validate_type_guid(ndd, nd_label,
|
||||
@ -1868,7 +1869,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
||||
return false;
|
||||
}
|
||||
found_uuid = true;
|
||||
if (nlabel != nd_region->ndr_mappings)
|
||||
if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
|
||||
continue;
|
||||
if (position != pos)
|
||||
continue;
|
||||
@ -1881,7 +1882,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
||||
return found != NULL;
|
||||
}
|
||||
|
||||
static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
|
||||
static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1900,7 +1901,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
|
||||
nd_label = label_ent->label;
|
||||
if (!nd_label)
|
||||
continue;
|
||||
if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
|
||||
if (nsl_uuid_equal(ndd, nd_label, pmem_id))
|
||||
break;
|
||||
nd_label = NULL;
|
||||
}
|
||||
@ -1923,7 +1924,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
|
||||
/* pass */;
|
||||
else {
|
||||
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
|
||||
dev_name(ndd->dev), nd_label->uuid);
|
||||
dev_name(ndd->dev),
|
||||
nsl_uuid_raw(ndd, nd_label));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1953,6 +1955,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
resource_size_t size = 0;
|
||||
struct resource *res;
|
||||
struct device *dev;
|
||||
uuid_t uuid;
|
||||
int rc = 0;
|
||||
u16 i;
|
||||
|
||||
@ -1963,12 +1966,12 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
|
||||
if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
|
||||
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
|
||||
nd_label->uuid);
|
||||
nsl_uuid_raw(ndd, nd_label));
|
||||
if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
|
||||
nd_label->uuid);
|
||||
nsl_uuid_raw(ndd, nd_label));
|
||||
}
|
||||
|
||||
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
|
||||
@ -1984,9 +1987,12 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
res->flags = IORESOURCE_MEM;
|
||||
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
|
||||
uuid_t uuid;
|
||||
|
||||
nsl_get_uuid(ndd, nd_label, &uuid);
|
||||
if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
|
||||
continue;
|
||||
if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
|
||||
if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
@ -2000,7 +2006,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
* find a dimm with two instances of the same uuid.
|
||||
*/
|
||||
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
|
||||
nvdimm_name(nvdimm), nd_label->uuid);
|
||||
nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
@ -2013,7 +2019,8 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
* the dimm being enabled (i.e. nd_label_reserve_dpa()
|
||||
* succeeded).
|
||||
*/
|
||||
rc = select_pmem_id(nd_region, nd_label->uuid);
|
||||
nsl_get_uuid(ndd, nd_label, &uuid);
|
||||
rc = select_pmem_id(nd_region, &uuid);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
@ -2039,8 +2046,8 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
WARN_ON(nspm->alt_name || nspm->uuid);
|
||||
nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
|
||||
NSLABEL_NAME_LEN, GFP_KERNEL);
|
||||
nspm->uuid = kmemdup((void __force *) label0->uuid,
|
||||
NSLABEL_UUID_LEN, GFP_KERNEL);
|
||||
nsl_get_uuid(ndd, label0, &uuid);
|
||||
nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
|
||||
nspm->lbasize = nsl_get_lbasize(ndd, label0);
|
||||
nspm->nsio.common.claim_class =
|
||||
nsl_get_claim_class(ndd, label0);
|
||||
@ -2217,15 +2224,15 @@ static int add_namespace_resource(struct nd_region *nd_region,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
u8 *uuid = namespace_to_uuid(devs[i]);
|
||||
uuid_t *uuid = namespace_to_uuid(devs[i]);
|
||||
struct resource *res;
|
||||
|
||||
if (IS_ERR_OR_NULL(uuid)) {
|
||||
if (IS_ERR(uuid)) {
|
||||
WARN_ON(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
|
||||
if (!nsl_uuid_equal(ndd, nd_label, uuid))
|
||||
continue;
|
||||
if (is_namespace_blk(devs[i])) {
|
||||
res = nsblk_add_resource(nd_region, ndd,
|
||||
@ -2236,8 +2243,8 @@ static int add_namespace_resource(struct nd_region *nd_region,
|
||||
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
|
||||
} else {
|
||||
dev_err(&nd_region->dev,
|
||||
"error: conflicting extents for uuid: %pUb\n",
|
||||
nd_label->uuid);
|
||||
"error: conflicting extents for uuid: %pUb\n",
|
||||
uuid);
|
||||
return -ENXIO;
|
||||
}
|
||||
break;
|
||||
@ -2257,6 +2264,7 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
|
||||
char name[NSLABEL_NAME_LEN];
|
||||
struct device *dev = NULL;
|
||||
struct resource *res;
|
||||
uuid_t uuid;
|
||||
|
||||
if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
@ -2271,7 +2279,8 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
|
||||
dev->parent = &nd_region->dev;
|
||||
nsblk->id = -1;
|
||||
nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
|
||||
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
|
||||
nsl_get_uuid(ndd, nd_label, &uuid);
|
||||
nsblk->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
|
||||
nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
|
||||
if (!nsblk->uuid)
|
||||
goto blk_err;
|
||||
|
@ -126,8 +126,9 @@ void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
|
||||
void nd_synchronize(void);
|
||||
void __nd_device_register(struct device *dev);
|
||||
struct nd_label_id;
|
||||
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
|
||||
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
|
||||
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
|
||||
u32 flags);
|
||||
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid);
|
||||
struct nd_region;
|
||||
struct nvdimm_drvdata;
|
||||
struct nd_mapping;
|
||||
|
@ -30,6 +30,7 @@ struct nvdimm_drvdata {
|
||||
int nslabel_size;
|
||||
struct nd_cmd_get_config_size nsarea;
|
||||
void *data;
|
||||
bool cxl;
|
||||
int ns_current, ns_next;
|
||||
struct resource dpa;
|
||||
struct kref kref;
|
||||
@ -38,13 +39,17 @@ struct nvdimm_drvdata {
|
||||
static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return nd_label->name;
|
||||
if (ndd->cxl)
|
||||
return nd_label->cxl.name;
|
||||
return nd_label->efi.name;
|
||||
}
|
||||
|
||||
static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, u8 *name)
|
||||
{
|
||||
return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
|
||||
if (ndd->cxl)
|
||||
return memcpy(name, nd_label->cxl.name, NSLABEL_NAME_LEN);
|
||||
return memcpy(name, nd_label->efi.name, NSLABEL_NAME_LEN);
|
||||
}
|
||||
|
||||
static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
|
||||
@ -52,129 +57,242 @@ static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
|
||||
{
|
||||
if (!name)
|
||||
return NULL;
|
||||
return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
|
||||
if (ndd->cxl)
|
||||
return memcpy(nd_label->cxl.name, name, NSLABEL_NAME_LEN);
|
||||
return memcpy(nd_label->efi.name, name, NSLABEL_NAME_LEN);
|
||||
}
|
||||
|
||||
static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le32_to_cpu(nd_label->slot);
|
||||
if (ndd->cxl)
|
||||
return __le32_to_cpu(nd_label->cxl.slot);
|
||||
return __le32_to_cpu(nd_label->efi.slot);
|
||||
}
|
||||
|
||||
static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, u32 slot)
|
||||
{
|
||||
nd_label->slot = __cpu_to_le32(slot);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.slot = __cpu_to_le32(slot);
|
||||
else
|
||||
nd_label->efi.slot = __cpu_to_le32(slot);
|
||||
}
|
||||
|
||||
static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le64_to_cpu(nd_label->checksum);
|
||||
if (ndd->cxl)
|
||||
return __le64_to_cpu(nd_label->cxl.checksum);
|
||||
return __le64_to_cpu(nd_label->efi.checksum);
|
||||
}
|
||||
|
||||
static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 checksum)
|
||||
{
|
||||
nd_label->checksum = __cpu_to_le64(checksum);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.checksum = __cpu_to_le64(checksum);
|
||||
else
|
||||
nd_label->efi.checksum = __cpu_to_le64(checksum);
|
||||
}
|
||||
|
||||
static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le32_to_cpu(nd_label->flags);
|
||||
if (ndd->cxl)
|
||||
return __le32_to_cpu(nd_label->cxl.flags);
|
||||
return __le32_to_cpu(nd_label->efi.flags);
|
||||
}
|
||||
|
||||
static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, u32 flags)
|
||||
{
|
||||
nd_label->flags = __cpu_to_le32(flags);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.flags = __cpu_to_le32(flags);
|
||||
else
|
||||
nd_label->efi.flags = __cpu_to_le32(flags);
|
||||
}
|
||||
|
||||
static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le64_to_cpu(nd_label->dpa);
|
||||
if (ndd->cxl)
|
||||
return __le64_to_cpu(nd_label->cxl.dpa);
|
||||
return __le64_to_cpu(nd_label->efi.dpa);
|
||||
}
|
||||
|
||||
static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label, u64 dpa)
|
||||
{
|
||||
nd_label->dpa = __cpu_to_le64(dpa);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.dpa = __cpu_to_le64(dpa);
|
||||
else
|
||||
nd_label->efi.dpa = __cpu_to_le64(dpa);
|
||||
}
|
||||
|
||||
static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le64_to_cpu(nd_label->rawsize);
|
||||
if (ndd->cxl)
|
||||
return __le64_to_cpu(nd_label->cxl.rawsize);
|
||||
return __le64_to_cpu(nd_label->efi.rawsize);
|
||||
}
|
||||
|
||||
static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 rawsize)
|
||||
{
|
||||
nd_label->rawsize = __cpu_to_le64(rawsize);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.rawsize = __cpu_to_le64(rawsize);
|
||||
else
|
||||
nd_label->efi.rawsize = __cpu_to_le64(rawsize);
|
||||
}
|
||||
|
||||
static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le64_to_cpu(nd_label->isetcookie);
|
||||
/* WARN future refactor attempts that break this assumption */
|
||||
if (dev_WARN_ONCE(ndd->dev, ndd->cxl,
|
||||
"CXL labels do not use the isetcookie concept\n"))
|
||||
return 0;
|
||||
return __le64_to_cpu(nd_label->efi.isetcookie);
|
||||
}
|
||||
|
||||
static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 isetcookie)
|
||||
{
|
||||
nd_label->isetcookie = __cpu_to_le64(isetcookie);
|
||||
if (!ndd->cxl)
|
||||
nd_label->efi.isetcookie = __cpu_to_le64(isetcookie);
|
||||
}
|
||||
|
||||
static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 cookie)
|
||||
{
|
||||
return cookie == __le64_to_cpu(nd_label->isetcookie);
|
||||
/*
|
||||
* Let the EFI and CXL validation comingle, where fields that
|
||||
* don't matter to CXL always validate.
|
||||
*/
|
||||
if (ndd->cxl)
|
||||
return true;
|
||||
return cookie == __le64_to_cpu(nd_label->efi.isetcookie);
|
||||
}
|
||||
|
||||
static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le16_to_cpu(nd_label->position);
|
||||
if (ndd->cxl)
|
||||
return __le16_to_cpu(nd_label->cxl.position);
|
||||
return __le16_to_cpu(nd_label->efi.position);
|
||||
}
|
||||
|
||||
static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u16 position)
|
||||
{
|
||||
nd_label->position = __cpu_to_le16(position);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.position = __cpu_to_le16(position);
|
||||
else
|
||||
nd_label->efi.position = __cpu_to_le16(position);
|
||||
}
|
||||
|
||||
|
||||
static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le16_to_cpu(nd_label->nlabel);
|
||||
if (ndd->cxl)
|
||||
return 0;
|
||||
return __le16_to_cpu(nd_label->efi.nlabel);
|
||||
}
|
||||
|
||||
static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u16 nlabel)
|
||||
{
|
||||
nd_label->nlabel = __cpu_to_le16(nlabel);
|
||||
if (!ndd->cxl)
|
||||
nd_label->efi.nlabel = __cpu_to_le16(nlabel);
|
||||
}
|
||||
|
||||
static inline u16 nsl_get_nrange(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
return __le16_to_cpu(nd_label->cxl.nrange);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void nsl_set_nrange(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u16 nrange)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.nrange = __cpu_to_le16(nrange);
|
||||
}
|
||||
|
||||
static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
return __le64_to_cpu(nd_label->lbasize);
|
||||
/*
|
||||
* Yes, for some reason the EFI labels convey a massive 64-bit
|
||||
* lbasize, that got fixed for CXL.
|
||||
*/
|
||||
if (ndd->cxl)
|
||||
return __le16_to_cpu(nd_label->cxl.lbasize);
|
||||
return __le64_to_cpu(nd_label->efi.lbasize);
|
||||
}
|
||||
|
||||
static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
u64 lbasize)
|
||||
{
|
||||
nd_label->lbasize = __cpu_to_le64(lbasize);
|
||||
if (ndd->cxl)
|
||||
nd_label->cxl.lbasize = __cpu_to_le16(lbasize);
|
||||
else
|
||||
nd_label->efi.lbasize = __cpu_to_le64(lbasize);
|
||||
}
|
||||
|
||||
static inline const uuid_t *nsl_get_uuid(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
uuid_t *uuid)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
import_uuid(uuid, nd_label->cxl.uuid);
|
||||
else
|
||||
import_uuid(uuid, nd_label->efi.uuid);
|
||||
return uuid;
|
||||
}
|
||||
|
||||
static inline const uuid_t *nsl_set_uuid(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
const uuid_t *uuid)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
export_uuid(nd_label->cxl.uuid, uuid);
|
||||
else
|
||||
export_uuid(nd_label->efi.uuid, uuid);
|
||||
return uuid;
|
||||
}
|
||||
|
||||
static inline bool nsl_uuid_equal(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label,
|
||||
const uuid_t *uuid)
|
||||
{
|
||||
uuid_t tmp;
|
||||
|
||||
if (ndd->cxl)
|
||||
import_uuid(&tmp, nd_label->cxl.uuid);
|
||||
else
|
||||
import_uuid(&tmp, nd_label->efi.uuid);
|
||||
return uuid_equal(&tmp, uuid);
|
||||
}
|
||||
|
||||
static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
return nd_label->cxl.uuid;
|
||||
return nd_label->efi.uuid;
|
||||
}
|
||||
|
||||
bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
|
||||
@ -233,8 +351,8 @@ static inline struct nd_namespace_index *to_next_namespace_index(
|
||||
|
||||
unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
|
||||
|
||||
#define namespace_label_has(ndd, field) \
|
||||
(offsetof(struct nd_namespace_label, field) \
|
||||
#define efi_namespace_label_has(ndd, field) \
|
||||
(!ndd->cxl && offsetof(struct nvdimm_efi_label, field) \
|
||||
< sizeof_namespace_label(ndd))
|
||||
|
||||
#define nd_dbg_dpa(r, d, res, fmt, arg...) \
|
||||
@ -310,6 +428,15 @@ struct nd_region {
|
||||
struct nd_mapping mapping[];
|
||||
};
|
||||
|
||||
static inline bool nsl_validate_nlabel(struct nd_region *nd_region,
|
||||
struct nvdimm_drvdata *ndd,
|
||||
struct nd_namespace_label *nd_label)
|
||||
{
|
||||
if (ndd->cxl)
|
||||
return true;
|
||||
return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings;
|
||||
}
|
||||
|
||||
struct nd_blk_region {
|
||||
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
|
||||
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
|
||||
@ -335,7 +462,7 @@ struct nd_btt {
|
||||
struct btt *btt;
|
||||
unsigned long lbasize;
|
||||
u64 size;
|
||||
u8 *uuid;
|
||||
uuid_t *uuid;
|
||||
int id;
|
||||
int initial_offset;
|
||||
u16 version_major;
|
||||
@ -350,7 +477,7 @@ enum nd_pfn_mode {
|
||||
|
||||
struct nd_pfn {
|
||||
int id;
|
||||
u8 *uuid;
|
||||
uuid_t *uuid;
|
||||
struct device dev;
|
||||
unsigned long align;
|
||||
unsigned long npfns;
|
||||
@ -378,7 +505,7 @@ void wait_nvdimm_bus_probe_idle(struct device *dev);
|
||||
void nd_device_register(struct device *dev);
|
||||
void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
|
||||
void nd_device_notify(struct device *dev, enum nvdimm_event event);
|
||||
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
|
||||
int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
|
||||
size_t len);
|
||||
ssize_t nd_size_select_show(unsigned long current_size,
|
||||
const unsigned long *supported, char *buf);
|
||||
@ -561,6 +688,6 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
|
||||
return false;
|
||||
}
|
||||
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
|
||||
const u8 *nd_dev_to_uuid(struct device *dev);
|
||||
const uuid_t *nd_dev_to_uuid(struct device *dev);
|
||||
bool pmem_should_map_pages(struct device *dev);
|
||||
#endif /* __ND_H__ */
|
||||
|
@ -452,7 +452,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
unsigned long align, start_pad;
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
|
||||
const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
|
||||
|
||||
if (!pfn_sb || !ndns)
|
||||
return -ENODEV;
|
||||
|
@ -732,6 +732,38 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
|
||||
|
||||
/**
|
||||
* pci_find_dvsec_capability - Find DVSEC for vendor
|
||||
* @dev: PCI device to query
|
||||
* @vendor: Vendor ID to match for the DVSEC
|
||||
* @dvsec: Designated Vendor-specific capability ID
|
||||
*
|
||||
* If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
|
||||
* offset in config space; otherwise return 0.
|
||||
*/
|
||||
u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
|
||||
{
|
||||
int pos;
|
||||
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
|
||||
if (!pos)
|
||||
return 0;
|
||||
|
||||
while (pos) {
|
||||
u16 v, id;
|
||||
|
||||
pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
|
||||
pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
|
||||
if (vendor == v && dvsec == id)
|
||||
return pos;
|
||||
|
||||
pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
|
||||
|
||||
/**
|
||||
* pci_find_parent_resource - return resource region of parent bus of given
|
||||
* region
|
||||
|
@ -88,7 +88,7 @@ struct nd_namespace_pmem {
|
||||
struct nd_namespace_io nsio;
|
||||
unsigned long lbasize;
|
||||
char *alt_name;
|
||||
u8 *uuid;
|
||||
uuid_t *uuid;
|
||||
int id;
|
||||
};
|
||||
|
||||
@ -105,7 +105,7 @@ struct nd_namespace_pmem {
|
||||
struct nd_namespace_blk {
|
||||
struct nd_namespace_common common;
|
||||
char *alt_name;
|
||||
u8 *uuid;
|
||||
uuid_t *uuid;
|
||||
int id;
|
||||
unsigned long lbasize;
|
||||
resource_size_t size;
|
||||
|
@ -1132,6 +1132,7 @@ u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
|
||||
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
|
||||
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
|
||||
u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
|
||||
u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
|
||||
|
||||
u64 pci_get_dsn(struct pci_dev *dev);
|
||||
|
||||
|
38
tools/testing/cxl/Kbuild
Normal file
38
tools/testing/cxl/Kbuild
Normal file
@ -0,0 +1,38 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
ldflags-y += --wrap=is_acpi_device_node
|
||||
ldflags-y += --wrap=acpi_get_table
|
||||
ldflags-y += --wrap=acpi_put_table
|
||||
ldflags-y += --wrap=acpi_evaluate_integer
|
||||
ldflags-y += --wrap=acpi_pci_find_root
|
||||
ldflags-y += --wrap=pci_walk_bus
|
||||
ldflags-y += --wrap=nvdimm_bus_register
|
||||
|
||||
DRIVERS := ../../../drivers
|
||||
CXL_SRC := $(DRIVERS)/cxl
|
||||
CXL_CORE_SRC := $(DRIVERS)/cxl/core
|
||||
ccflags-y := -I$(srctree)/drivers/cxl/
|
||||
ccflags-y += -D__mock=__weak
|
||||
|
||||
obj-m += cxl_acpi.o
|
||||
|
||||
cxl_acpi-y := $(CXL_SRC)/acpi.o
|
||||
cxl_acpi-y += mock_acpi.o
|
||||
cxl_acpi-y += config_check.o
|
||||
|
||||
obj-m += cxl_pmem.o
|
||||
|
||||
cxl_pmem-y := $(CXL_SRC)/pmem.o
|
||||
cxl_pmem-y += config_check.o
|
||||
|
||||
obj-m += cxl_core.o
|
||||
|
||||
cxl_core-y := $(CXL_CORE_SRC)/bus.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/pmem.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/regs.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/memdev.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
|
||||
cxl_core-y += config_check.o
|
||||
|
||||
cxl_core-y += mock_pmem.o
|
||||
|
||||
obj-m += test/
|
13
tools/testing/cxl/config_check.c
Normal file
13
tools/testing/cxl/config_check.c
Normal file
@ -0,0 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bug.h>
|
||||
|
||||
void check(void)
|
||||
{
|
||||
/*
|
||||
* These kconfig symbols must be set to "m" for cxl_test to load
|
||||
* and operate.
|
||||
*/
|
||||
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS));
|
||||
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI));
|
||||
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM));
|
||||
}
|
109
tools/testing/cxl/mock_acpi.c
Normal file
109
tools/testing/cxl/mock_acpi.c
Normal file
@ -0,0 +1,109 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <cxl.h>
|
||||
#include "test/mock.h"
|
||||
|
||||
struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev)
|
||||
{
|
||||
int index;
|
||||
struct acpi_device *adev, *found = NULL;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
|
||||
if (ops && ops->is_mock_bridge(dev)) {
|
||||
found = ACPI_COMPANION(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev->bus == &platform_bus_type)
|
||||
goto out;
|
||||
|
||||
adev = to_acpi_device(dev);
|
||||
if (!acpi_pci_find_root(adev->handle))
|
||||
goto out;
|
||||
|
||||
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) {
|
||||
found = adev;
|
||||
dev_dbg(host, "found host bridge %s\n", dev_name(&adev->dev));
|
||||
}
|
||||
out:
|
||||
put_cxl_mock_ops(index);
|
||||
return found;
|
||||
}
|
||||
|
||||
static int match_add_root_port(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct cxl_walk_context *ctx = data;
|
||||
struct pci_bus *root_bus = ctx->root;
|
||||
struct cxl_port *port = ctx->port;
|
||||
int type = pci_pcie_type(pdev);
|
||||
struct device *dev = ctx->dev;
|
||||
u32 lnkcap, port_num;
|
||||
int rc;
|
||||
|
||||
if (pdev->bus != root_bus)
|
||||
return 0;
|
||||
if (!pci_is_pcie(pdev))
|
||||
return 0;
|
||||
if (type != PCI_EXP_TYPE_ROOT_PORT)
|
||||
return 0;
|
||||
if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
|
||||
&lnkcap) != PCIBIOS_SUCCESSFUL)
|
||||
return 0;
|
||||
|
||||
/* TODO walk DVSEC to find component register base */
|
||||
port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
|
||||
rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to add dport: %s (%d)\n",
|
||||
dev_name(&pdev->dev), rc);
|
||||
ctx->error = rc;
|
||||
return rc;
|
||||
}
|
||||
ctx->count++;
|
||||
|
||||
dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_add_root_port(struct platform_device *pdev, void *data)
|
||||
{
|
||||
struct cxl_walk_context *ctx = data;
|
||||
struct cxl_port *port = ctx->port;
|
||||
struct device *dev = ctx->dev;
|
||||
int rc;
|
||||
|
||||
rc = cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to add dport: %s (%d)\n",
|
||||
dev_name(&pdev->dev), rc);
|
||||
ctx->error = rc;
|
||||
return rc;
|
||||
}
|
||||
ctx->count++;
|
||||
|
||||
dev_dbg(dev, "add dport%d: %s\n", pdev->id, dev_name(&pdev->dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int match_add_root_ports(struct pci_dev *dev, void *data)
|
||||
{
|
||||
int index, rc;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
struct platform_device *pdev = (struct platform_device *) dev;
|
||||
|
||||
if (ops && ops->is_mock_port(pdev))
|
||||
rc = mock_add_root_port(pdev, data);
|
||||
else
|
||||
rc = match_add_root_port(dev, data);
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return rc;
|
||||
}
|
24
tools/testing/cxl/mock_pmem.c
Normal file
24
tools/testing/cxl/mock_pmem.c
Normal file
@ -0,0 +1,24 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
|
||||
#include <cxl.h>
|
||||
#include "test/mock.h"
|
||||
#include <core/core.h>
|
||||
|
||||
int match_nvdimm_bridge(struct device *dev, const void *data)
|
||||
{
|
||||
int index, rc = 0;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
const struct cxl_nvdimm *cxl_nvd = data;
|
||||
|
||||
if (ops) {
|
||||
if (dev->type == &cxl_nvdimm_bridge_type &&
|
||||
(ops->is_mock_dev(dev->parent->parent) ==
|
||||
ops->is_mock_dev(cxl_nvd->dev.parent->parent)))
|
||||
rc = 1;
|
||||
} else
|
||||
rc = dev->type == &cxl_nvdimm_bridge_type;
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return rc;
|
||||
}
|
10
tools/testing/cxl/test/Kbuild
Normal file
10
tools/testing/cxl/test/Kbuild
Normal file
@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
ccflags-y := -I$(srctree)/drivers/cxl/
|
||||
|
||||
obj-m += cxl_test.o
|
||||
obj-m += cxl_mock.o
|
||||
obj-m += cxl_mock_mem.o
|
||||
|
||||
cxl_test-y := cxl.o
|
||||
cxl_mock-y := mock.o
|
||||
cxl_mock_mem-y := mem.o
|
576
tools/testing/cxl/test/cxl.c
Normal file
576
tools/testing/cxl/test/cxl.c
Normal file
@ -0,0 +1,576 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright(c) 2021 Intel Corporation. All rights reserved.
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mm.h>
|
||||
#include "mock.h"
|
||||
|
||||
#define NR_CXL_HOST_BRIDGES 4
|
||||
#define NR_CXL_ROOT_PORTS 2
|
||||
|
||||
static struct platform_device *cxl_acpi;
|
||||
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
|
||||
static struct platform_device
|
||||
*cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
|
||||
struct platform_device *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
|
||||
|
||||
static struct acpi_device acpi0017_mock;
|
||||
static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
|
||||
[0] = {
|
||||
.handle = &host_bridge[0],
|
||||
},
|
||||
[1] = {
|
||||
.handle = &host_bridge[1],
|
||||
},
|
||||
[2] = {
|
||||
.handle = &host_bridge[2],
|
||||
},
|
||||
[3] = {
|
||||
.handle = &host_bridge[3],
|
||||
},
|
||||
};
|
||||
|
||||
static bool is_mock_dev(struct device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
|
||||
if (dev == &cxl_mem[i]->dev)
|
||||
return true;
|
||||
if (dev == &cxl_acpi->dev)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_mock_adev(struct acpi_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (adev == &acpi0017_mock)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
|
||||
if (adev == &host_bridge[i])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct {
|
||||
struct acpi_table_cedt cedt;
|
||||
struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
|
||||
struct {
|
||||
struct acpi_cedt_cfmws cfmws;
|
||||
u32 target[1];
|
||||
} cfmws0;
|
||||
struct {
|
||||
struct acpi_cedt_cfmws cfmws;
|
||||
u32 target[4];
|
||||
} cfmws1;
|
||||
struct {
|
||||
struct acpi_cedt_cfmws cfmws;
|
||||
u32 target[1];
|
||||
} cfmws2;
|
||||
struct {
|
||||
struct acpi_cedt_cfmws cfmws;
|
||||
u32 target[4];
|
||||
} cfmws3;
|
||||
} __packed mock_cedt = {
|
||||
.cedt = {
|
||||
.header = {
|
||||
.signature = "CEDT",
|
||||
.length = sizeof(mock_cedt),
|
||||
.revision = 1,
|
||||
},
|
||||
},
|
||||
.chbs[0] = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CHBS,
|
||||
.length = sizeof(mock_cedt.chbs[0]),
|
||||
},
|
||||
.uid = 0,
|
||||
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
||||
},
|
||||
.chbs[1] = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CHBS,
|
||||
.length = sizeof(mock_cedt.chbs[0]),
|
||||
},
|
||||
.uid = 1,
|
||||
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
||||
},
|
||||
.chbs[2] = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CHBS,
|
||||
.length = sizeof(mock_cedt.chbs[0]),
|
||||
},
|
||||
.uid = 2,
|
||||
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
||||
},
|
||||
.chbs[3] = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CHBS,
|
||||
.length = sizeof(mock_cedt.chbs[0]),
|
||||
},
|
||||
.uid = 3,
|
||||
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
||||
},
|
||||
.cfmws0 = {
|
||||
.cfmws = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CFMWS,
|
||||
.length = sizeof(mock_cedt.cfmws0),
|
||||
},
|
||||
.interleave_ways = 0,
|
||||
.granularity = 4,
|
||||
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
||||
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
|
||||
.qtg_id = 0,
|
||||
.window_size = SZ_256M,
|
||||
},
|
||||
.target = { 0 },
|
||||
},
|
||||
.cfmws1 = {
|
||||
.cfmws = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CFMWS,
|
||||
.length = sizeof(mock_cedt.cfmws1),
|
||||
},
|
||||
.interleave_ways = 2,
|
||||
.granularity = 4,
|
||||
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
||||
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
|
||||
.qtg_id = 1,
|
||||
.window_size = SZ_256M * 4,
|
||||
},
|
||||
.target = { 0, 1, 2, 3 },
|
||||
},
|
||||
.cfmws2 = {
|
||||
.cfmws = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CFMWS,
|
||||
.length = sizeof(mock_cedt.cfmws2),
|
||||
},
|
||||
.interleave_ways = 0,
|
||||
.granularity = 4,
|
||||
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
||||
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
||||
.qtg_id = 2,
|
||||
.window_size = SZ_256M,
|
||||
},
|
||||
.target = { 0 },
|
||||
},
|
||||
.cfmws3 = {
|
||||
.cfmws = {
|
||||
.header = {
|
||||
.type = ACPI_CEDT_TYPE_CFMWS,
|
||||
.length = sizeof(mock_cedt.cfmws3),
|
||||
},
|
||||
.interleave_ways = 2,
|
||||
.granularity = 4,
|
||||
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
||||
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
||||
.qtg_id = 3,
|
||||
.window_size = SZ_256M * 4,
|
||||
},
|
||||
.target = { 0, 1, 2, 3 },
|
||||
},
|
||||
};
|
||||
|
||||
struct cxl_mock_res {
|
||||
struct list_head list;
|
||||
struct range range;
|
||||
};
|
||||
|
||||
static LIST_HEAD(mock_res);
|
||||
static DEFINE_MUTEX(mock_res_lock);
|
||||
static struct gen_pool *cxl_mock_pool;
|
||||
|
||||
static void depopulate_all_mock_resources(void)
|
||||
{
|
||||
struct cxl_mock_res *res, *_res;
|
||||
|
||||
mutex_lock(&mock_res_lock);
|
||||
list_for_each_entry_safe(res, _res, &mock_res, list) {
|
||||
gen_pool_free(cxl_mock_pool, res->range.start,
|
||||
range_len(&res->range));
|
||||
list_del(&res->list);
|
||||
kfree(res);
|
||||
}
|
||||
mutex_unlock(&mock_res_lock);
|
||||
}
|
||||
|
||||
static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
|
||||
{
|
||||
struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
struct genpool_data_align data = {
|
||||
.align = SZ_256M,
|
||||
};
|
||||
unsigned long phys;
|
||||
|
||||
INIT_LIST_HEAD(&res->list);
|
||||
phys = gen_pool_alloc_algo(cxl_mock_pool, size,
|
||||
gen_pool_first_fit_align, &data);
|
||||
if (!phys)
|
||||
return NULL;
|
||||
|
||||
res->range = (struct range) {
|
||||
.start = phys,
|
||||
.end = phys + size - 1,
|
||||
};
|
||||
mutex_lock(&mock_res_lock);
|
||||
list_add(&res->list, &mock_res);
|
||||
mutex_unlock(&mock_res_lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int populate_cedt(void)
|
||||
{
|
||||
struct acpi_cedt_cfmws *cfmws[4] = {
|
||||
[0] = &mock_cedt.cfmws0.cfmws,
|
||||
[1] = &mock_cedt.cfmws1.cfmws,
|
||||
[2] = &mock_cedt.cfmws2.cfmws,
|
||||
[3] = &mock_cedt.cfmws3.cfmws,
|
||||
};
|
||||
struct cxl_mock_res *res;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
|
||||
struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
|
||||
resource_size_t size;
|
||||
|
||||
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
|
||||
size = ACPI_CEDT_CHBS_LENGTH_CXL20;
|
||||
else
|
||||
size = ACPI_CEDT_CHBS_LENGTH_CXL11;
|
||||
|
||||
res = alloc_mock_res(size);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
chbs->base = res->range.start;
|
||||
chbs->length = size;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cfmws); i++) {
|
||||
struct acpi_cedt_cfmws *window = cfmws[i];
|
||||
|
||||
res = alloc_mock_res(window->window_size);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
window->base_hpa = res->range.start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static acpi_status mock_acpi_get_table(char *signature, u32 instance,
|
||||
struct acpi_table_header **out_table)
|
||||
{
|
||||
if (instance < U32_MAX || strcmp(signature, ACPI_SIG_CEDT) != 0)
|
||||
return acpi_get_table(signature, instance, out_table);
|
||||
|
||||
*out_table = (struct acpi_table_header *) &mock_cedt;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void mock_acpi_put_table(struct acpi_table_header *table)
|
||||
{
|
||||
if (table == (struct acpi_table_header *) &mock_cedt)
|
||||
return;
|
||||
acpi_put_table(table);
|
||||
}
|
||||
|
||||
static bool is_mock_bridge(struct device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
|
||||
if (dev == &cxl_host_bridge[i]->dev)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int host_bridge_index(struct acpi_device *adev)
|
||||
{
|
||||
return adev - host_bridge;
|
||||
}
|
||||
|
||||
static struct acpi_device *find_host_bridge(acpi_handle handle)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
|
||||
if (handle == host_bridge[i].handle)
|
||||
return &host_bridge[i];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
|
||||
struct acpi_object_list *arguments,
|
||||
unsigned long long *data)
|
||||
{
|
||||
struct acpi_device *adev = find_host_bridge(handle);
|
||||
|
||||
if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
|
||||
return acpi_evaluate_integer(handle, pathname, arguments, data);
|
||||
|
||||
*data = host_bridge_index(adev);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
|
||||
static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
|
||||
[0] = {
|
||||
.bus = &mock_pci_bus[0],
|
||||
},
|
||||
[1] = {
|
||||
.bus = &mock_pci_bus[1],
|
||||
},
|
||||
[2] = {
|
||||
.bus = &mock_pci_bus[2],
|
||||
},
|
||||
[3] = {
|
||||
.bus = &mock_pci_bus[3],
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *mock_cxl_root_port(struct pci_bus *bus, int index)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
|
||||
if (bus == &mock_pci_bus[i])
|
||||
return cxl_root_port[index + i * NR_CXL_ROOT_PORTS];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool is_mock_port(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
|
||||
if (pdev == cxl_root_port[i])
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_mock_bus(struct pci_bus *bus)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
|
||||
if (bus == &mock_pci_bus[i])
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *adev = find_host_bridge(handle);
|
||||
|
||||
if (!adev)
|
||||
return acpi_pci_find_root(handle);
|
||||
return &mock_pci_root[host_bridge_index(adev)];
|
||||
}
|
||||
|
||||
static struct cxl_mock_ops cxl_mock_ops = {
|
||||
.is_mock_adev = is_mock_adev,
|
||||
.is_mock_bridge = is_mock_bridge,
|
||||
.is_mock_bus = is_mock_bus,
|
||||
.is_mock_port = is_mock_port,
|
||||
.is_mock_dev = is_mock_dev,
|
||||
.mock_port = mock_cxl_root_port,
|
||||
.acpi_get_table = mock_acpi_get_table,
|
||||
.acpi_put_table = mock_acpi_put_table,
|
||||
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
|
||||
.acpi_pci_find_root = mock_acpi_pci_find_root,
|
||||
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
|
||||
};
|
||||
|
||||
static void mock_companion(struct acpi_device *adev, struct device *dev)
|
||||
{
|
||||
device_initialize(&adev->dev);
|
||||
fwnode_init(&adev->fwnode, NULL);
|
||||
dev->fwnode = &adev->fwnode;
|
||||
adev->fwnode.dev = dev;
|
||||
}
|
||||
|
||||
#ifndef SZ_64G
|
||||
#define SZ_64G (SZ_32G * 2)
|
||||
#endif
|
||||
|
||||
#ifndef SZ_512G
|
||||
#define SZ_512G (SZ_64G * 8)
|
||||
#endif
|
||||
|
||||
static struct platform_device *alloc_memdev(int id)
|
||||
{
|
||||
struct resource res[] = {
|
||||
[0] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
.desc = IORES_DESC_PERSISTENT_MEMORY,
|
||||
},
|
||||
};
|
||||
struct platform_device *pdev;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(res); i++) {
|
||||
struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
|
||||
|
||||
if (!r)
|
||||
return NULL;
|
||||
res[i].start = r->range.start;
|
||||
res[i].end = r->range.end;
|
||||
}
|
||||
|
||||
pdev = platform_device_alloc("cxl_mem", id);
|
||||
if (!pdev)
|
||||
return NULL;
|
||||
|
||||
rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return pdev;
|
||||
|
||||
err:
|
||||
platform_device_put(pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __init int cxl_test_init(void)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
register_cxl_mock_ops(&cxl_mock_ops);
|
||||
|
||||
cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
|
||||
if (!cxl_mock_pool) {
|
||||
rc = -ENOMEM;
|
||||
goto err_gen_pool_create;
|
||||
}
|
||||
|
||||
rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
|
||||
if (rc)
|
||||
goto err_gen_pool_add;
|
||||
|
||||
rc = populate_cedt();
|
||||
if (rc)
|
||||
goto err_populate;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
|
||||
struct acpi_device *adev = &host_bridge[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_host_bridge", i);
|
||||
if (!pdev)
|
||||
goto err_bridge;
|
||||
|
||||
mock_companion(adev, &pdev->dev);
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_bridge;
|
||||
}
|
||||
cxl_host_bridge[i] = pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
|
||||
struct platform_device *bridge =
|
||||
cxl_host_bridge[i / NR_CXL_ROOT_PORTS];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_root_port", i);
|
||||
if (!pdev)
|
||||
goto err_port;
|
||||
pdev->dev.parent = &bridge->dev;
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_port;
|
||||
}
|
||||
cxl_root_port[i] = pdev;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_root_port));
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
|
||||
struct platform_device *port = cxl_root_port[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = alloc_memdev(i);
|
||||
if (!pdev)
|
||||
goto err_mem;
|
||||
pdev->dev.parent = &port->dev;
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_mem;
|
||||
}
|
||||
cxl_mem[i] = pdev;
|
||||
}
|
||||
|
||||
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
|
||||
if (!cxl_acpi)
|
||||
goto err_mem;
|
||||
|
||||
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
|
||||
acpi0017_mock.dev.bus = &platform_bus_type;
|
||||
|
||||
rc = platform_device_add(cxl_acpi);
|
||||
if (rc)
|
||||
goto err_add;
|
||||
|
||||
return 0;
|
||||
|
||||
err_add:
|
||||
platform_device_put(cxl_acpi);
|
||||
err_mem:
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
err_port:
|
||||
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_root_port[i]);
|
||||
err_bridge:
|
||||
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_host_bridge[i]);
|
||||
err_populate:
|
||||
depopulate_all_mock_resources();
|
||||
err_gen_pool_add:
|
||||
gen_pool_destroy(cxl_mock_pool);
|
||||
err_gen_pool_create:
|
||||
unregister_cxl_mock_ops(&cxl_mock_ops);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static __exit void cxl_test_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
platform_device_unregister(cxl_acpi);
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_root_port[i]);
|
||||
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_host_bridge[i]);
|
||||
depopulate_all_mock_resources();
|
||||
gen_pool_destroy(cxl_mock_pool);
|
||||
unregister_cxl_mock_ops(&cxl_mock_ops);
|
||||
}
|
||||
|
||||
module_init(cxl_test_init);
|
||||
module_exit(cxl_test_exit);
|
||||
MODULE_LICENSE("GPL v2");
|
256
tools/testing/cxl/test/mem.c
Normal file
256
tools/testing/cxl/test/mem.c
Normal file
@ -0,0 +1,256 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright(c) 2021 Intel Corporation. All rights reserved.
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/bits.h>
|
||||
#include <cxlmem.h>
|
||||
|
||||
#define LSA_SIZE SZ_128K
|
||||
#define EFFECT(x) (1U << x)
|
||||
|
||||
static struct cxl_cel_entry mock_cel[] = {
|
||||
{
|
||||
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
|
||||
.effect = cpu_to_le16(0),
|
||||
},
|
||||
{
|
||||
.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
|
||||
.effect = cpu_to_le16(0),
|
||||
},
|
||||
{
|
||||
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
|
||||
.effect = cpu_to_le16(0),
|
||||
},
|
||||
{
|
||||
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
|
||||
.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
|
||||
},
|
||||
};
|
||||
|
||||
static struct {
|
||||
struct cxl_mbox_get_supported_logs gsl;
|
||||
struct cxl_gsl_entry entry;
|
||||
} mock_gsl_payload = {
|
||||
.gsl = {
|
||||
.entries = cpu_to_le16(1),
|
||||
},
|
||||
.entry = {
|
||||
.uuid = DEFINE_CXL_CEL_UUID,
|
||||
.size = cpu_to_le32(sizeof(mock_cel)),
|
||||
},
|
||||
};
|
||||
|
||||
static int mock_gsl(struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
if (cmd->size_out < sizeof(mock_gsl_payload))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
|
||||
cmd->size_out = sizeof(mock_gsl_payload);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct cxl_mbox_get_log *gl = cmd->payload_in;
|
||||
u32 offset = le32_to_cpu(gl->offset);
|
||||
u32 length = le32_to_cpu(gl->length);
|
||||
uuid_t uuid = DEFINE_CXL_CEL_UUID;
|
||||
void *data = &mock_cel;
|
||||
|
||||
if (cmd->size_in < sizeof(*gl))
|
||||
return -EINVAL;
|
||||
if (length > cxlm->payload_size)
|
||||
return -EINVAL;
|
||||
if (offset + length > sizeof(mock_cel))
|
||||
return -EINVAL;
|
||||
if (!uuid_equal(&gl->uuid, &uuid))
|
||||
return -EINVAL;
|
||||
if (length > cmd->size_out)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(cmd->payload_out, data + offset, length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(cxlm->dev);
|
||||
struct cxl_mbox_identify id = {
|
||||
.fw_revision = { "mock fw v1 " },
|
||||
.lsa_size = cpu_to_le32(LSA_SIZE),
|
||||
/* FIXME: Add partition support */
|
||||
.partition_align = cpu_to_le64(0),
|
||||
};
|
||||
u64 capacity = 0;
|
||||
int i;
|
||||
|
||||
if (cmd->size_out < sizeof(id))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
|
||||
if (!res)
|
||||
break;
|
||||
|
||||
capacity += resource_size(res) / CXL_CAPACITY_MULTIPLIER;
|
||||
|
||||
if (le64_to_cpu(id.partition_align))
|
||||
continue;
|
||||
|
||||
if (res->desc == IORES_DESC_PERSISTENT_MEMORY)
|
||||
id.persistent_capacity = cpu_to_le64(
|
||||
resource_size(res) / CXL_CAPACITY_MULTIPLIER);
|
||||
else
|
||||
id.volatile_capacity = cpu_to_le64(
|
||||
resource_size(res) / CXL_CAPACITY_MULTIPLIER);
|
||||
}
|
||||
|
||||
id.total_capacity = cpu_to_le64(capacity);
|
||||
|
||||
memcpy(cmd->payload_out, &id, sizeof(id));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
|
||||
void *lsa = dev_get_drvdata(cxlm->dev);
|
||||
u32 offset, length;
|
||||
|
||||
if (sizeof(*get_lsa) > cmd->size_in)
|
||||
return -EINVAL;
|
||||
offset = le32_to_cpu(get_lsa->offset);
|
||||
length = le32_to_cpu(get_lsa->length);
|
||||
if (offset + length > LSA_SIZE)
|
||||
return -EINVAL;
|
||||
if (length > cmd->size_out)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(cmd->payload_out, lsa + offset, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
|
||||
void *lsa = dev_get_drvdata(cxlm->dev);
|
||||
u32 offset, length;
|
||||
|
||||
if (sizeof(*set_lsa) > cmd->size_in)
|
||||
return -EINVAL;
|
||||
offset = le32_to_cpu(set_lsa->offset);
|
||||
length = cmd->size_in - sizeof(*set_lsa);
|
||||
if (offset + length > LSA_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(lsa + offset, &set_lsa->data[0], length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct device *dev = cxlm->dev;
|
||||
int rc = -EIO;
|
||||
|
||||
switch (cmd->opcode) {
|
||||
case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
|
||||
rc = mock_gsl(cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_GET_LOG:
|
||||
rc = mock_get_log(cxlm, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_IDENTIFY:
|
||||
rc = mock_id(cxlm, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_GET_LSA:
|
||||
rc = mock_get_lsa(cxlm, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_SET_LSA:
|
||||
rc = mock_set_lsa(cxlm, cmd);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
|
||||
cmd->size_in, cmd->size_out, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void label_area_release(void *lsa)
|
||||
{
|
||||
vfree(lsa);
|
||||
}
|
||||
|
||||
static int cxl_mock_mem_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct cxl_mem *cxlm;
|
||||
void *lsa;
|
||||
int rc;
|
||||
|
||||
lsa = vmalloc(LSA_SIZE);
|
||||
if (!lsa)
|
||||
return -ENOMEM;
|
||||
rc = devm_add_action_or_reset(dev, label_area_release, lsa);
|
||||
if (rc)
|
||||
return rc;
|
||||
dev_set_drvdata(dev, lsa);
|
||||
|
||||
cxlm = cxl_mem_create(dev);
|
||||
if (IS_ERR(cxlm))
|
||||
return PTR_ERR(cxlm);
|
||||
|
||||
cxlm->mbox_send = cxl_mock_mbox_send;
|
||||
cxlm->payload_size = SZ_4K;
|
||||
|
||||
rc = cxl_mem_enumerate_cmds(cxlm);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_mem_identify(cxlm);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_mem_create_range_info(cxlm);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cxlmd = devm_cxl_add_memdev(cxlm);
|
||||
if (IS_ERR(cxlmd))
|
||||
return PTR_ERR(cxlmd);
|
||||
|
||||
if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
|
||||
rc = devm_cxl_add_nvdimm(dev, cxlmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id cxl_mock_mem_ids[] = {
|
||||
{ .name = "cxl_mem", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
|
||||
|
||||
static struct platform_driver cxl_mock_mem_driver = {
|
||||
.probe = cxl_mock_mem_probe,
|
||||
.id_table = cxl_mock_mem_ids,
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(cxl_mock_mem_driver);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_IMPORT_NS(CXL);
|
171
tools/testing/cxl/test/mock.c
Normal file
171
tools/testing/cxl/test/mock.c
Normal file
@ -0,0 +1,171 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
//Copyright(c) 2021 Intel Corporation. All rights reserved.
|
||||
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
#include "mock.h"
|
||||
|
||||
static LIST_HEAD(mock);
|
||||
|
||||
void register_cxl_mock_ops(struct cxl_mock_ops *ops)
|
||||
{
|
||||
list_add_rcu(&ops->list, &mock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
|
||||
|
||||
static DEFINE_SRCU(cxl_mock_srcu);
|
||||
|
||||
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
|
||||
{
|
||||
list_del_rcu(&ops->list);
|
||||
synchronize_srcu(&cxl_mock_srcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_cxl_mock_ops);
|
||||
|
||||
struct cxl_mock_ops *get_cxl_mock_ops(int *index)
|
||||
{
|
||||
*index = srcu_read_lock(&cxl_mock_srcu);
|
||||
return list_first_or_null_rcu(&mock, struct cxl_mock_ops, list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_cxl_mock_ops);
|
||||
|
||||
void put_cxl_mock_ops(int index)
|
||||
{
|
||||
srcu_read_unlock(&cxl_mock_srcu, index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_cxl_mock_ops);
|
||||
|
||||
bool __wrap_is_acpi_device_node(const struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct acpi_device *adev =
|
||||
container_of(fwnode, struct acpi_device, fwnode);
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
bool retval = false;
|
||||
|
||||
if (ops)
|
||||
retval = ops->is_mock_adev(adev);
|
||||
|
||||
if (!retval)
|
||||
retval = is_acpi_device_node(fwnode);
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap_is_acpi_device_node);
|
||||
|
||||
acpi_status __wrap_acpi_get_table(char *signature, u32 instance,
|
||||
struct acpi_table_header **out_table)
|
||||
{
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
acpi_status status;
|
||||
|
||||
if (ops)
|
||||
status = ops->acpi_get_table(signature, instance, out_table);
|
||||
else
|
||||
status = acpi_get_table(signature, instance, out_table);
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap_acpi_get_table);
|
||||
|
||||
void __wrap_acpi_put_table(struct acpi_table_header *table)
|
||||
{
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
|
||||
if (ops)
|
||||
ops->acpi_put_table(table);
|
||||
else
|
||||
acpi_put_table(table);
|
||||
put_cxl_mock_ops(index);
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap_acpi_put_table);
|
||||
|
||||
acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
|
||||
acpi_string pathname,
|
||||
struct acpi_object_list *arguments,
|
||||
unsigned long long *data)
|
||||
{
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
acpi_status status;
|
||||
|
||||
if (ops)
|
||||
status = ops->acpi_evaluate_integer(handle, pathname, arguments,
|
||||
data);
|
||||
else
|
||||
status = acpi_evaluate_integer(handle, pathname, arguments,
|
||||
data);
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap_acpi_evaluate_integer);
|
||||
|
||||
struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle)
|
||||
{
|
||||
int index;
|
||||
struct acpi_pci_root *root;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
|
||||
if (ops)
|
||||
root = ops->acpi_pci_find_root(handle);
|
||||
else
|
||||
root = acpi_pci_find_root(handle);
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return root;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wrap_acpi_pci_find_root);
|
||||
|
||||
void __wrap_pci_walk_bus(struct pci_bus *bus,
|
||||
int (*cb)(struct pci_dev *, void *), void *userdata)
|
||||
{
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
|
||||
if (ops && ops->is_mock_bus(bus)) {
|
||||
int rc, i;
|
||||
|
||||
/*
|
||||
* Simulate 2 root ports per host-bridge and no
|
||||
* depth recursion.
|
||||
*/
|
||||
for (i = 0; i < 2; i++) {
|
||||
rc = cb((struct pci_dev *) ops->mock_port(bus, i),
|
||||
userdata);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
} else
|
||||
pci_walk_bus(bus, cb, userdata);
|
||||
|
||||
put_cxl_mock_ops(index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wrap_pci_walk_bus);
|
||||
|
||||
struct nvdimm_bus *
|
||||
__wrap_nvdimm_bus_register(struct device *dev,
|
||||
struct nvdimm_bus_descriptor *nd_desc)
|
||||
{
|
||||
int index;
|
||||
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||
|
||||
if (ops && ops->is_mock_dev(dev->parent->parent))
|
||||
nd_desc->provider_name = "cxl_test";
|
||||
put_cxl_mock_ops(index);
|
||||
|
||||
return nvdimm_bus_register(dev, nd_desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
27
tools/testing/cxl/test/mock.h
Normal file
27
tools/testing/cxl/test/mock.h
Normal file
@ -0,0 +1,27 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
struct cxl_mock_ops {
|
||||
struct list_head list;
|
||||
bool (*is_mock_adev)(struct acpi_device *dev);
|
||||
acpi_status (*acpi_get_table)(char *signature, u32 instance,
|
||||
struct acpi_table_header **out_table);
|
||||
void (*acpi_put_table)(struct acpi_table_header *table);
|
||||
bool (*is_mock_bridge)(struct device *dev);
|
||||
acpi_status (*acpi_evaluate_integer)(acpi_handle handle,
|
||||
acpi_string pathname,
|
||||
struct acpi_object_list *arguments,
|
||||
unsigned long long *data);
|
||||
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
|
||||
struct platform_device *(*mock_port)(struct pci_bus *bus, int index);
|
||||
bool (*is_mock_bus)(struct pci_bus *bus);
|
||||
bool (*is_mock_port)(struct platform_device *pdev);
|
||||
bool (*is_mock_dev)(struct device *dev);
|
||||
};
|
||||
|
||||
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
|
||||
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
|
||||
struct cxl_mock_ops *get_cxl_mock_ops(int *index);
|
||||
void put_cxl_mock_ops(int index);
|
Loading…
Reference in New Issue
Block a user