2021-02-17 12:09:52 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
|
2021-05-14 13:22:05 +08:00
|
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
#include <linux/memregion.h>
|
2022-02-04 23:18:31 +08:00
|
|
|
#include <linux/workqueue.h>
|
2022-07-11 00:57:28 +08:00
|
|
|
#include <linux/debugfs.h>
|
2021-02-17 12:09:52 +08:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/module.h>
|
2021-06-04 08:50:36 +08:00
|
|
|
#include <linux/pci.h>
|
2021-06-10 00:01:35 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/idr.h>
|
2021-08-03 01:29:38 +08:00
|
|
|
#include <cxlmem.h>
|
2022-02-04 23:08:40 +08:00
|
|
|
#include <cxlpci.h>
|
2021-08-03 01:29:38 +08:00
|
|
|
#include <cxl.h>
|
2021-08-03 01:29:49 +08:00
|
|
|
#include "core.h"
|
2021-02-17 12:09:52 +08:00
|
|
|
|
|
|
|
/**
|
2021-05-14 13:22:00 +08:00
|
|
|
* DOC: cxl core
|
2021-02-17 12:09:52 +08:00
|
|
|
*
|
2021-08-03 01:29:43 +08:00
|
|
|
* The CXL core provides a set of interfaces that can be consumed by CXL aware
|
|
|
|
* drivers. The interfaces allow for creation, modification, and destruction of
|
|
|
|
* regions, memory devices, ports, and decoders. CXL aware drivers must register
|
|
|
|
* with the CXL core via these interfaces in order to be able to participate in
|
|
|
|
* cross-device interleave coordination. The CXL core also establishes and
|
|
|
|
* maintains the bridge to the nvdimm subsystem.
|
|
|
|
*
|
|
|
|
* CXL core introduces sysfs hierarchy to control the devices that are
|
|
|
|
* instantiated by the core.
|
2021-02-17 12:09:52 +08:00
|
|
|
*/
|
2021-05-14 13:22:00 +08:00
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
static DEFINE_IDA(cxl_port_ida);
|
2022-02-01 00:44:52 +08:00
|
|
|
static DEFINE_XARRAY(cxl_root_buses);
|
2021-06-10 00:01:35 +08:00
|
|
|
|
|
|
|
static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
return sysfs_emit(buf, "%s\n", dev->type->name);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(devtype);
|
|
|
|
|
2022-01-24 08:30:41 +08:00
|
|
|
static int cxl_device_id(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev->type == &cxl_nvdimm_bridge_type)
|
|
|
|
return CXL_DEVICE_NVDIMM_BRIDGE;
|
|
|
|
if (dev->type == &cxl_nvdimm_type)
|
|
|
|
return CXL_DEVICE_NVDIMM;
|
2022-01-12 00:06:40 +08:00
|
|
|
if (dev->type == CXL_PMEM_REGION_TYPE())
|
|
|
|
return CXL_DEVICE_PMEM_REGION;
|
2022-02-02 05:07:51 +08:00
|
|
|
if (is_cxl_port(dev)) {
|
|
|
|
if (is_cxl_root(to_cxl_port(dev)))
|
|
|
|
return CXL_DEVICE_ROOT;
|
|
|
|
return CXL_DEVICE_PORT;
|
|
|
|
}
|
2022-02-04 23:18:31 +08:00
|
|
|
if (is_cxl_memdev(dev))
|
|
|
|
return CXL_DEVICE_MEMORY_EXPANDER;
|
2021-06-16 05:00:40 +08:00
|
|
|
if (dev->type == CXL_REGION_TYPE())
|
|
|
|
return CXL_DEVICE_REGION;
|
2022-01-24 08:30:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(modalias);
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
static struct attribute *cxl_base_attributes[] = {
|
|
|
|
&dev_attr_devtype.attr,
|
2022-01-24 08:30:41 +08:00
|
|
|
&dev_attr_modalias.attr,
|
2021-06-10 00:01:35 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2021-08-03 01:29:49 +08:00
|
|
|
struct attribute_group cxl_base_attribute_group = {
|
2021-06-10 00:01:35 +08:00
|
|
|
.attrs = cxl_base_attributes,
|
|
|
|
};
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
static ssize_t start_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
|
|
|
|
2022-05-19 09:02:39 +08:00
|
|
|
return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
|
2021-06-10 00:43:29 +08:00
|
|
|
}
|
2022-01-24 08:29:26 +08:00
|
|
|
static DEVICE_ATTR_ADMIN_RO(start);
|
2021-06-10 00:43:29 +08:00
|
|
|
|
|
|
|
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
|
|
|
|
2022-05-19 09:02:39 +08:00
|
|
|
return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
|
2021-06-10 00:43:29 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(size);
|
|
|
|
|
|
|
|
#define CXL_DECODER_FLAG_ATTR(name, flag) \
|
|
|
|
static ssize_t name##_show(struct device *dev, \
|
|
|
|
struct device_attribute *attr, char *buf) \
|
|
|
|
{ \
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev); \
|
|
|
|
\
|
|
|
|
return sysfs_emit(buf, "%s\n", \
|
|
|
|
(cxld->flags & (flag)) ? "1" : "0"); \
|
|
|
|
} \
|
|
|
|
static DEVICE_ATTR_RO(name)
|
|
|
|
|
|
|
|
CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
|
|
|
|
CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
|
|
|
|
CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
|
|
|
|
CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
|
|
|
|
CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
|
|
|
|
|
|
|
|
static ssize_t target_type_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
|
|
|
|
|
|
|
switch (cxld->target_type) {
|
|
|
|
case CXL_DECODER_ACCELERATOR:
|
|
|
|
return sysfs_emit(buf, "accelerator\n");
|
|
|
|
case CXL_DECODER_EXPANDER:
|
|
|
|
return sysfs_emit(buf, "expander\n");
|
|
|
|
}
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(target_type);
|
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
|
2021-06-10 00:43:29 +08:00
|
|
|
{
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_decoder *cxld = &cxlsd->cxld;
|
2021-06-10 00:43:29 +08:00
|
|
|
ssize_t offset = 0;
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < cxld->interleave_ways; i++) {
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_dport *dport = cxlsd->target[i];
|
2021-06-10 00:43:29 +08:00
|
|
|
struct cxl_dport *next = NULL;
|
|
|
|
|
|
|
|
if (!dport)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (i + 1 < cxld->interleave_ways)
|
2022-05-19 08:52:23 +08:00
|
|
|
next = cxlsd->target[i + 1];
|
2021-06-10 00:43:29 +08:00
|
|
|
rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
|
|
|
|
next ? "," : "");
|
|
|
|
if (rc < 0)
|
2022-02-01 07:35:18 +08:00
|
|
|
return rc;
|
2021-06-10 00:43:29 +08:00
|
|
|
offset += rc;
|
|
|
|
}
|
2022-02-01 07:35:18 +08:00
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t target_list_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
|
2022-02-01 07:35:18 +08:00
|
|
|
ssize_t offset;
|
|
|
|
unsigned int seq;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
do {
|
2022-05-19 08:52:23 +08:00
|
|
|
seq = read_seqbegin(&cxlsd->target_lock);
|
|
|
|
rc = emit_target_list(cxlsd, buf);
|
|
|
|
} while (read_seqretry(&cxlsd->target_lock, seq));
|
2021-06-10 00:43:29 +08:00
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2022-02-01 07:35:18 +08:00
|
|
|
offset = rc;
|
2021-06-10 00:43:29 +08:00
|
|
|
|
|
|
|
rc = sysfs_emit_at(buf, offset, "\n");
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
return offset + rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(target_list);
|
|
|
|
|
2022-05-24 03:15:35 +08:00
|
|
|
static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
|
2023-02-10 17:05:39 +08:00
|
|
|
return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
|
2022-05-24 03:15:35 +08:00
|
|
|
}
|
2022-05-24 09:02:30 +08:00
|
|
|
|
|
|
|
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
enum cxl_decoder_mode mode;
|
|
|
|
ssize_t rc;
|
|
|
|
|
|
|
|
if (sysfs_streq(buf, "pmem"))
|
|
|
|
mode = CXL_DECODER_PMEM;
|
|
|
|
else if (sysfs_streq(buf, "ram"))
|
|
|
|
mode = CXL_DECODER_RAM;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rc = cxl_dpa_set_mode(cxled, mode);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(mode);
|
|
|
|
|
|
|
|
static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
u64 base = cxl_dpa_resource_start(cxled);
|
|
|
|
|
|
|
|
return sysfs_emit(buf, "%#llx\n", base);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(dpa_resource);
|
|
|
|
|
|
|
|
static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
resource_size_t size = cxl_dpa_size(cxled);
|
|
|
|
|
|
|
|
return sysfs_emit(buf, "%pa\n", &size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
unsigned long long size;
|
|
|
|
ssize_t rc;
|
|
|
|
|
|
|
|
rc = kstrtoull(buf, 0, &size);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(size, SZ_256M))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rc = cxl_dpa_free(cxled);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
return len;
|
|
|
|
|
|
|
|
rc = cxl_dpa_alloc(cxled, size);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(dpa_size);
|
2022-05-24 03:15:35 +08:00
|
|
|
|
2022-04-11 06:26:13 +08:00
|
|
|
static ssize_t interleave_granularity_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
|
|
|
|
|
|
|
return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR_RO(interleave_granularity);
|
|
|
|
|
|
|
|
static ssize_t interleave_ways_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
|
|
|
|
|
|
|
return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR_RO(interleave_ways);
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
static struct attribute *cxl_decoder_base_attrs[] = {
|
|
|
|
&dev_attr_start.attr,
|
|
|
|
&dev_attr_size.attr,
|
|
|
|
&dev_attr_locked.attr,
|
2022-04-11 06:26:13 +08:00
|
|
|
&dev_attr_interleave_granularity.attr,
|
|
|
|
&dev_attr_interleave_ways.attr,
|
2021-06-10 00:43:29 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cxl_decoder_base_attribute_group = {
|
|
|
|
.attrs = cxl_decoder_base_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *cxl_decoder_root_attrs[] = {
|
|
|
|
&dev_attr_cap_pmem.attr,
|
|
|
|
&dev_attr_cap_ram.attr,
|
|
|
|
&dev_attr_cap_type2.attr,
|
|
|
|
&dev_attr_cap_type3.attr,
|
2022-01-24 08:31:41 +08:00
|
|
|
&dev_attr_target_list.attr,
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
SET_CXL_REGION_ATTR(create_pmem_region)
|
|
|
|
SET_CXL_REGION_ATTR(delete_region)
|
2021-06-10 00:43:29 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
|
|
|
|
{
|
|
|
|
unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
|
|
|
|
|
|
|
|
return (cxlrd->cxlsd.cxld.flags & flags) == flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = kobj_to_dev(kobj);
|
|
|
|
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
|
|
|
|
|
|
|
if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
static struct attribute_group cxl_decoder_root_attribute_group = {
|
|
|
|
.attrs = cxl_decoder_root_attrs,
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
.is_visible = cxl_root_decoder_visible,
|
2021-06-10 00:43:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
|
|
|
|
&cxl_decoder_root_attribute_group,
|
|
|
|
&cxl_decoder_base_attribute_group,
|
|
|
|
&cxl_base_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *cxl_decoder_switch_attrs[] = {
|
|
|
|
&dev_attr_target_type.attr,
|
2022-01-24 08:31:41 +08:00
|
|
|
&dev_attr_target_list.attr,
|
2022-07-23 08:56:03 +08:00
|
|
|
SET_CXL_REGION_ATTR(region)
|
2021-06-10 00:43:29 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cxl_decoder_switch_attribute_group = {
|
|
|
|
.attrs = cxl_decoder_switch_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
|
|
|
|
&cxl_decoder_switch_attribute_group,
|
|
|
|
&cxl_decoder_base_attribute_group,
|
|
|
|
&cxl_base_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
static struct attribute *cxl_decoder_endpoint_attrs[] = {
|
|
|
|
&dev_attr_target_type.attr,
|
2022-05-24 03:15:35 +08:00
|
|
|
&dev_attr_mode.attr,
|
2022-05-24 09:02:30 +08:00
|
|
|
&dev_attr_dpa_size.attr,
|
|
|
|
&dev_attr_dpa_resource.attr,
|
2022-07-23 08:56:03 +08:00
|
|
|
SET_CXL_REGION_ATTR(region)
|
2022-02-03 12:02:06 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cxl_decoder_endpoint_attribute_group = {
|
|
|
|
.attrs = cxl_decoder_endpoint_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
|
|
|
|
&cxl_decoder_base_attribute_group,
|
|
|
|
&cxl_decoder_endpoint_attribute_group,
|
|
|
|
&cxl_base_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
static void __cxl_decoder_release(struct cxl_decoder *cxld)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
|
|
|
|
|
|
|
ida_free(&port->decoder_ida, cxld->id);
|
|
|
|
put_device(&port->dev);
|
|
|
|
}
|
|
|
|
|
2022-05-22 07:24:14 +08:00
|
|
|
static void cxl_endpoint_decoder_release(struct device *dev)
|
2021-06-10 00:43:29 +08:00
|
|
|
{
|
2022-05-22 07:24:14 +08:00
|
|
|
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2022-05-22 07:24:14 +08:00
|
|
|
__cxl_decoder_release(&cxled->cxld);
|
|
|
|
kfree(cxled);
|
2022-05-19 08:52:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cxl_switch_decoder_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
|
|
|
|
|
|
|
|
__cxl_decoder_release(&cxlsd->cxld);
|
|
|
|
kfree(cxlsd);
|
2021-06-10 00:43:29 +08:00
|
|
|
}
|
|
|
|
|
2022-07-13 09:38:26 +08:00
|
|
|
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
|
|
|
|
"not a cxl_root_decoder device\n"))
|
|
|
|
return NULL;
|
|
|
|
return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
|
|
|
|
|
|
|
|
static void cxl_root_decoder_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
|
|
|
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
if (atomic_read(&cxlrd->region_id) >= 0)
|
|
|
|
memregion_free(atomic_read(&cxlrd->region_id));
|
2022-07-13 09:38:26 +08:00
|
|
|
__cxl_decoder_release(&cxlrd->cxlsd.cxld);
|
|
|
|
kfree(cxlrd);
|
|
|
|
}
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
static const struct device_type cxl_decoder_endpoint_type = {
|
|
|
|
.name = "cxl_decoder_endpoint",
|
2022-05-22 07:24:14 +08:00
|
|
|
.release = cxl_endpoint_decoder_release,
|
2022-02-03 12:02:06 +08:00
|
|
|
.groups = cxl_decoder_endpoint_attribute_groups,
|
|
|
|
};
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
static const struct device_type cxl_decoder_switch_type = {
|
|
|
|
.name = "cxl_decoder_switch",
|
2022-05-19 08:52:23 +08:00
|
|
|
.release = cxl_switch_decoder_release,
|
2021-06-10 00:43:29 +08:00
|
|
|
.groups = cxl_decoder_switch_attribute_groups,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct device_type cxl_decoder_root_type = {
|
|
|
|
.name = "cxl_decoder_root",
|
2022-07-13 09:38:26 +08:00
|
|
|
.release = cxl_root_decoder_release,
|
2021-06-10 00:43:29 +08:00
|
|
|
.groups = cxl_decoder_root_attribute_groups,
|
|
|
|
};
|
|
|
|
|
2022-03-05 05:36:45 +08:00
|
|
|
bool is_endpoint_decoder(struct device *dev)
|
2022-02-03 12:02:06 +08:00
|
|
|
{
|
|
|
|
return dev->type == &cxl_decoder_endpoint_type;
|
|
|
|
}
|
|
|
|
|
2021-06-16 07:18:17 +08:00
|
|
|
bool is_root_decoder(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->type == &cxl_decoder_root_type;
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
|
2021-06-16 07:18:17 +08:00
|
|
|
|
2022-06-08 01:56:10 +08:00
|
|
|
bool is_switch_decoder(struct device *dev)
|
2022-05-19 08:52:23 +08:00
|
|
|
{
|
|
|
|
return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
|
|
|
|
}
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
struct cxl_decoder *to_cxl_decoder(struct device *dev)
|
|
|
|
{
|
2022-05-19 08:52:23 +08:00
|
|
|
if (dev_WARN_ONCE(dev,
|
|
|
|
!is_switch_decoder(dev) && !is_endpoint_decoder(dev),
|
2021-06-10 00:43:29 +08:00
|
|
|
"not a cxl_decoder device\n"))
|
|
|
|
return NULL;
|
|
|
|
return container_of(dev, struct cxl_decoder, dev);
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2022-05-22 07:24:14 +08:00
|
|
|
struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
|
|
|
|
"not a cxl_endpoint_decoder device\n"))
|
|
|
|
return NULL;
|
|
|
|
return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
|
|
|
|
|
2022-06-07 06:18:31 +08:00
|
|
|
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
|
2022-05-19 08:52:23 +08:00
|
|
|
{
|
|
|
|
if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
|
|
|
|
"not a cxl_switch_decoder device\n"))
|
|
|
|
return NULL;
|
|
|
|
return container_of(dev, struct cxl_switch_decoder, cxld.dev);
|
|
|
|
}
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
static void cxl_ep_release(struct cxl_ep *ep)
|
|
|
|
{
|
|
|
|
put_device(ep->ep);
|
|
|
|
kfree(ep);
|
|
|
|
}
|
|
|
|
|
2022-05-28 01:58:26 +08:00
|
|
|
static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
|
|
|
|
{
|
|
|
|
if (!ep)
|
|
|
|
return;
|
|
|
|
xa_erase(&port->endpoints, (unsigned long) ep->ep);
|
|
|
|
cxl_ep_release(ep);
|
|
|
|
}
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
static void cxl_port_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = to_cxl_port(dev);
|
2022-05-28 01:58:26 +08:00
|
|
|
unsigned long index;
|
|
|
|
struct cxl_ep *ep;
|
2021-06-10 00:01:35 +08:00
|
|
|
|
2022-05-28 01:58:26 +08:00
|
|
|
xa_for_each(&port->endpoints, index, ep)
|
|
|
|
cxl_ep_remove(port, ep);
|
|
|
|
xa_destroy(&port->endpoints);
|
2022-05-28 11:51:19 +08:00
|
|
|
xa_destroy(&port->dports);
|
2022-06-08 01:56:10 +08:00
|
|
|
xa_destroy(&port->regions);
|
2021-06-10 00:01:35 +08:00
|
|
|
ida_free(&cxl_port_ida, port->id);
|
|
|
|
kfree(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_port_attribute_groups[] = {
|
|
|
|
&cxl_base_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct device_type cxl_port_type = {
|
|
|
|
.name = "cxl_port",
|
|
|
|
.release = cxl_port_release,
|
|
|
|
.groups = cxl_port_attribute_groups,
|
|
|
|
};
|
|
|
|
|
2022-02-01 03:50:09 +08:00
|
|
|
bool is_cxl_port(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->type == &cxl_port_type;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
struct cxl_port *to_cxl_port(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
|
|
|
|
"not a cxl_port device\n"))
|
|
|
|
return NULL;
|
|
|
|
return container_of(dev, struct cxl_port, dev);
|
|
|
|
}
|
2022-02-01 03:50:09 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
|
2021-06-10 00:01:35 +08:00
|
|
|
|
2021-06-10 00:01:46 +08:00
|
|
|
static void unregister_port(void *_port)
|
2021-06-10 00:01:35 +08:00
|
|
|
{
|
2021-06-10 00:01:46 +08:00
|
|
|
struct cxl_port *port = _port;
|
2022-02-09 15:37:28 +08:00
|
|
|
struct cxl_port *parent;
|
|
|
|
struct device *lock_dev;
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-09 15:37:28 +08:00
|
|
|
if (is_cxl_root(port))
|
|
|
|
parent = NULL;
|
|
|
|
else
|
|
|
|
parent = to_cxl_port(port->dev.parent);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CXL root port's and the first level of ports are unregistered
|
|
|
|
* under the platform firmware device lock, all other ports are
|
|
|
|
* unregistered while holding their parent port lock.
|
|
|
|
*/
|
|
|
|
if (!parent)
|
|
|
|
lock_dev = port->uport;
|
|
|
|
else if (is_cxl_root(parent))
|
|
|
|
lock_dev = parent->uport;
|
|
|
|
else
|
|
|
|
lock_dev = &parent->dev;
|
2022-02-02 05:07:51 +08:00
|
|
|
|
2022-02-09 15:37:28 +08:00
|
|
|
device_lock_assert(lock_dev);
|
2022-06-04 07:43:48 +08:00
|
|
|
port->dead = true;
|
2021-06-10 00:01:46 +08:00
|
|
|
device_unregister(&port->dev);
|
2021-06-10 00:01:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cxl_unlink_uport(void *_port)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = _port;
|
|
|
|
|
|
|
|
sysfs_remove_link(&port->dev.kobj, "uport");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return devm_add_action_or_reset(host, cxl_unlink_uport, port);
|
|
|
|
}
|
|
|
|
|
2023-01-26 07:32:57 +08:00
|
|
|
static void cxl_unlink_parent_dport(void *_port)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = _port;
|
|
|
|
|
|
|
|
sysfs_remove_link(&port->dev.kobj, "parent_dport");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int devm_cxl_link_parent_dport(struct device *host,
|
|
|
|
struct cxl_port *port,
|
|
|
|
struct cxl_dport *parent_dport)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!parent_dport)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport->kobj,
|
|
|
|
"parent_dport");
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
|
|
|
|
}
|
|
|
|
|
2022-04-21 23:33:13 +08:00
|
|
|
static struct lock_class_key cxl_port_key;
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
static struct cxl_port *cxl_port_alloc(struct device *uport,
|
|
|
|
resource_size_t component_reg_phys,
|
2022-05-28 01:57:01 +08:00
|
|
|
struct cxl_dport *parent_dport)
|
2021-06-10 00:01:35 +08:00
|
|
|
{
|
|
|
|
struct cxl_port *port;
|
|
|
|
struct device *dev;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
|
|
|
if (!port)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err;
|
|
|
|
port->id = rc;
|
2022-06-02 03:49:32 +08:00
|
|
|
port->uport = uport;
|
2021-06-10 00:01:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The top-level cxl_port "cxl_root" does not have a cxl_port as
|
|
|
|
* its parent and it does not have any corresponding component
|
|
|
|
* registers as its decode is described by a fixed platform
|
|
|
|
* description.
|
|
|
|
*/
|
|
|
|
dev = &port->dev;
|
2022-05-28 01:57:01 +08:00
|
|
|
if (parent_dport) {
|
|
|
|
struct cxl_port *parent_port = parent_dport->port;
|
2022-06-02 03:49:32 +08:00
|
|
|
struct cxl_port *iter;
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
dev->parent = &parent_port->dev;
|
2022-04-21 23:33:13 +08:00
|
|
|
port->depth = parent_port->depth + 1;
|
2022-05-28 01:57:01 +08:00
|
|
|
port->parent_dport = parent_dport;
|
2022-06-02 03:49:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* walk to the host bridge, or the first ancestor that knows
|
|
|
|
* the host bridge
|
|
|
|
*/
|
|
|
|
iter = port;
|
|
|
|
while (!iter->host_bridge &&
|
|
|
|
!is_cxl_root(to_cxl_port(iter->dev.parent)))
|
|
|
|
iter = to_cxl_port(iter->dev.parent);
|
|
|
|
if (iter->host_bridge)
|
|
|
|
port->host_bridge = iter->host_bridge;
|
2022-12-03 16:40:29 +08:00
|
|
|
else if (parent_dport->rch)
|
|
|
|
port->host_bridge = parent_dport->dport;
|
2022-06-02 03:49:32 +08:00
|
|
|
else
|
|
|
|
port->host_bridge = iter->uport;
|
|
|
|
dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
|
2022-04-21 23:33:13 +08:00
|
|
|
} else
|
2021-06-10 00:01:35 +08:00
|
|
|
dev->parent = uport;
|
|
|
|
|
|
|
|
port->component_reg_phys = component_reg_phys;
|
2021-06-10 00:43:29 +08:00
|
|
|
ida_init(&port->decoder_ida);
|
2022-05-25 03:04:58 +08:00
|
|
|
port->hdm_end = -1;
|
2022-06-09 13:56:37 +08:00
|
|
|
port->commit_end = -1;
|
2022-05-28 11:51:19 +08:00
|
|
|
xa_init(&port->dports);
|
2022-05-28 01:58:26 +08:00
|
|
|
xa_init(&port->endpoints);
|
2022-06-08 01:56:10 +08:00
|
|
|
xa_init(&port->regions);
|
2021-06-10 00:01:35 +08:00
|
|
|
|
|
|
|
device_initialize(dev);
|
2022-04-21 23:33:13 +08:00
|
|
|
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
|
2021-06-10 00:01:35 +08:00
|
|
|
device_set_pm_not_required(dev);
|
|
|
|
dev->bus = &cxl_bus_type;
|
|
|
|
dev->type = &cxl_port_type;
|
|
|
|
|
|
|
|
return port;
|
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(port);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
2022-10-18 21:23:31 +08:00
|
|
|
static struct cxl_port *__devm_cxl_add_port(struct device *host,
|
|
|
|
struct device *uport,
|
|
|
|
resource_size_t component_reg_phys,
|
|
|
|
struct cxl_dport *parent_dport)
|
2021-06-10 00:01:35 +08:00
|
|
|
{
|
|
|
|
struct cxl_port *port;
|
|
|
|
struct device *dev;
|
|
|
|
int rc;
|
|
|
|
|
2022-05-28 01:57:01 +08:00
|
|
|
port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
|
2021-06-10 00:01:35 +08:00
|
|
|
if (IS_ERR(port))
|
|
|
|
return port;
|
|
|
|
|
|
|
|
dev = &port->dev;
|
2022-02-04 23:18:31 +08:00
|
|
|
if (is_cxl_memdev(uport))
|
|
|
|
rc = dev_set_name(dev, "endpoint%d", port->id);
|
2022-05-28 01:57:01 +08:00
|
|
|
else if (parent_dport)
|
2021-06-10 00:01:35 +08:00
|
|
|
rc = dev_set_name(dev, "port%d", port->id);
|
|
|
|
else
|
|
|
|
rc = dev_set_name(dev, "root%d", port->id);
|
|
|
|
if (rc)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
rc = device_add(dev);
|
|
|
|
if (rc)
|
|
|
|
goto err;
|
|
|
|
|
2021-06-10 00:01:46 +08:00
|
|
|
rc = devm_add_action_or_reset(host, unregister_port, port);
|
2021-06-10 00:01:35 +08:00
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
rc = devm_cxl_link_uport(host, port);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
2023-01-26 07:32:57 +08:00
|
|
|
rc = devm_cxl_link_parent_dport(host, port, parent_dport);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
return port;
|
|
|
|
|
|
|
|
err:
|
|
|
|
put_device(dev);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
2022-10-18 21:23:31 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
|
|
|
|
* @host: host device for devm operations
|
|
|
|
* @uport: "physical" device implementing this upstream port
|
|
|
|
* @component_reg_phys: (optional) for configurable cxl_port instances
|
|
|
|
* @parent_dport: next hop up in the CXL memory decode hierarchy
|
|
|
|
*/
|
|
|
|
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
|
|
|
|
resource_size_t component_reg_phys,
|
|
|
|
struct cxl_dport *parent_dport)
|
|
|
|
{
|
|
|
|
struct cxl_port *port, *parent_port;
|
|
|
|
|
|
|
|
port = __devm_cxl_add_port(host, uport, component_reg_phys,
|
|
|
|
parent_dport);
|
|
|
|
|
|
|
|
parent_port = parent_dport ? parent_dport->port : NULL;
|
|
|
|
if (IS_ERR(port)) {
|
|
|
|
dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n",
|
|
|
|
dev_name(&port->dev),
|
|
|
|
parent_port ? " to " : "",
|
|
|
|
parent_port ? dev_name(&parent_port->dev) : "",
|
|
|
|
parent_port ? "" : " (root port)",
|
|
|
|
PTR_ERR(port));
|
|
|
|
} else {
|
|
|
|
dev_dbg(uport, "%s added%s%s%s\n",
|
|
|
|
dev_name(&port->dev),
|
|
|
|
parent_port ? " to " : "",
|
|
|
|
parent_port ? dev_name(&parent_port->dev) : "",
|
|
|
|
parent_port ? "" : " (root port)");
|
|
|
|
}
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
|
2021-06-10 00:01:35 +08:00
|
|
|
|
2022-02-01 00:44:52 +08:00
|
|
|
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
/* There is no pci_bus associated with a CXL platform-root port */
|
|
|
|
if (is_cxl_root(port))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (dev_is_pci(port->uport)) {
|
|
|
|
struct pci_dev *pdev = to_pci_dev(port->uport);
|
|
|
|
|
|
|
|
return pdev->subordinate;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xa_load(&cxl_root_buses, (unsigned long)port->uport);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
|
|
|
|
|
|
|
|
static void unregister_pci_bus(void *uport)
|
|
|
|
{
|
|
|
|
xa_erase(&cxl_root_buses, (unsigned long)uport);
|
|
|
|
}
|
|
|
|
|
|
|
|
int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
|
|
|
|
struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (dev_is_pci(uport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return devm_add_action_or_reset(host, unregister_pci_bus, uport);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
static bool dev_is_cxl_root_child(struct device *dev)
|
2022-02-01 08:34:40 +08:00
|
|
|
{
|
|
|
|
struct cxl_port *port, *parent;
|
|
|
|
|
|
|
|
if (!is_cxl_port(dev))
|
2022-02-04 23:08:40 +08:00
|
|
|
return false;
|
2022-02-01 08:34:40 +08:00
|
|
|
|
|
|
|
port = to_cxl_port(dev);
|
|
|
|
if (is_cxl_root(port))
|
2022-02-04 23:08:40 +08:00
|
|
|
return false;
|
2022-02-01 08:34:40 +08:00
|
|
|
|
|
|
|
parent = to_cxl_port(port->dev.parent);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (is_cxl_root(parent))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
|
|
|
|
static int match_root_child(struct device *dev, const void *match)
|
|
|
|
{
|
|
|
|
const struct device *iter = NULL;
|
|
|
|
struct cxl_dport *dport;
|
|
|
|
struct cxl_port *port;
|
|
|
|
|
|
|
|
if (!dev_is_cxl_root_child(dev))
|
2022-02-01 08:34:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
port = to_cxl_port(dev);
|
2022-05-28 11:51:19 +08:00
|
|
|
iter = match;
|
|
|
|
while (iter) {
|
|
|
|
dport = cxl_find_dport_by_dev(port, iter);
|
|
|
|
if (dport)
|
|
|
|
break;
|
|
|
|
iter = iter->parent;
|
2022-02-01 08:34:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return !!iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cxl_port *find_cxl_root(struct device *dev)
|
|
|
|
{
|
|
|
|
struct device *port_dev;
|
|
|
|
struct cxl_port *root;
|
|
|
|
|
|
|
|
port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
|
|
|
|
if (!port_dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
root = to_cxl_port(port_dev->parent);
|
|
|
|
get_device(&root->dev);
|
|
|
|
put_device(port_dev);
|
|
|
|
return root;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
|
|
|
|
|
2021-06-10 00:01:46 +08:00
|
|
|
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dport;
|
2022-05-28 11:51:19 +08:00
|
|
|
unsigned long index;
|
2021-06-10 00:01:46 +08:00
|
|
|
|
|
|
|
device_lock_assert(&port->dev);
|
2022-05-28 11:51:19 +08:00
|
|
|
xa_for_each(&port->dports, index, dport)
|
2021-06-10 00:01:46 +08:00
|
|
|
if (dport->port_id == id)
|
|
|
|
return dport;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_dport(struct cxl_port *port, struct cxl_dport *new)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dup;
|
2022-11-04 08:30:54 +08:00
|
|
|
int rc;
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-01 09:07:38 +08:00
|
|
|
device_lock_assert(&port->dev);
|
2021-06-10 00:01:46 +08:00
|
|
|
dup = find_dport(port, new->port_id);
|
2022-05-28 11:51:19 +08:00
|
|
|
if (dup) {
|
2021-06-10 00:01:46 +08:00
|
|
|
dev_err(&port->dev,
|
|
|
|
"unable to add dport%d-%s non-unique port id (%s)\n",
|
|
|
|
new->port_id, dev_name(new->dport),
|
|
|
|
dev_name(dup->dport));
|
2022-05-28 11:51:19 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
2022-11-04 08:30:54 +08:00
|
|
|
|
|
|
|
rc = xa_insert(&port->dports, (unsigned long)new->dport, new,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
port->nr_dports++;
|
|
|
|
return 0;
|
2021-06-10 00:01:46 +08:00
|
|
|
}
|
|
|
|
|
2022-02-02 05:07:51 +08:00
|
|
|
/*
|
|
|
|
* Since root-level CXL dports cannot be enumerated by PCI they are not
|
|
|
|
* enumerated by the common port driver that acquires the port lock over
|
|
|
|
* dport add/remove. Instead, root dports are manually added by a
|
|
|
|
* platform driver and cond_cxl_root_lock() is used to take the missing
|
|
|
|
* port lock in that case.
|
|
|
|
*/
|
|
|
|
static void cond_cxl_root_lock(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
if (is_cxl_root(port))
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(&port->dev);
|
2022-02-02 05:07:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cond_cxl_root_unlock(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
if (is_cxl_root(port))
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&port->dev);
|
2022-02-02 05:07:51 +08:00
|
|
|
}
|
|
|
|
|
2022-02-01 10:10:04 +08:00
|
|
|
static void cxl_dport_remove(void *data)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dport = data;
|
|
|
|
struct cxl_port *port = dport->port;
|
|
|
|
|
2022-05-28 11:51:19 +08:00
|
|
|
xa_erase(&port->dports, (unsigned long) dport->dport);
|
2022-02-01 10:10:04 +08:00
|
|
|
put_device(dport->dport);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cxl_dport_unlink(void *data)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dport = data;
|
|
|
|
struct cxl_port *port = dport->port;
|
|
|
|
char link_name[CXL_TARGET_STRLEN];
|
|
|
|
|
|
|
|
sprintf(link_name, "dport%d", dport->port_id);
|
|
|
|
sysfs_remove_link(&port->dev.kobj, link_name);
|
|
|
|
}
|
|
|
|
|
2022-12-03 16:40:29 +08:00
|
|
|
static struct cxl_dport *
|
|
|
|
__devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
|
|
|
|
int port_id, resource_size_t component_reg_phys,
|
|
|
|
resource_size_t rcrb)
|
2021-06-10 00:01:46 +08:00
|
|
|
{
|
|
|
|
char link_name[CXL_TARGET_STRLEN];
|
|
|
|
struct cxl_dport *dport;
|
2022-02-02 05:23:14 +08:00
|
|
|
struct device *host;
|
2021-06-10 00:01:46 +08:00
|
|
|
int rc;
|
|
|
|
|
2022-02-02 05:23:14 +08:00
|
|
|
if (is_cxl_root(port))
|
|
|
|
host = port->uport;
|
|
|
|
else
|
|
|
|
host = &port->dev;
|
|
|
|
|
2022-02-01 10:10:04 +08:00
|
|
|
if (!host->driver) {
|
|
|
|
dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
|
|
|
|
dev_name(dport_dev));
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
2021-06-10 00:01:46 +08:00
|
|
|
if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
|
|
|
|
CXL_TARGET_STRLEN)
|
2022-02-01 10:10:04 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-01 10:10:04 +08:00
|
|
|
dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
|
2021-06-10 00:01:46 +08:00
|
|
|
if (!dport)
|
2022-02-01 10:10:04 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-01 10:10:04 +08:00
|
|
|
dport->dport = dport_dev;
|
2021-06-10 00:01:46 +08:00
|
|
|
dport->port_id = port_id;
|
|
|
|
dport->component_reg_phys = component_reg_phys;
|
|
|
|
dport->port = port;
|
2022-12-03 16:40:29 +08:00
|
|
|
if (rcrb != CXL_RESOURCE_NONE)
|
|
|
|
dport->rch = true;
|
|
|
|
dport->rcrb = rcrb;
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-02 05:07:51 +08:00
|
|
|
cond_cxl_root_lock(port);
|
2021-06-10 00:01:46 +08:00
|
|
|
rc = add_dport(port, dport);
|
2022-02-02 05:07:51 +08:00
|
|
|
cond_cxl_root_unlock(port);
|
2021-06-10 00:01:46 +08:00
|
|
|
if (rc)
|
2022-02-01 10:10:04 +08:00
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
get_device(dport_dev);
|
|
|
|
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
2021-06-10 00:01:46 +08:00
|
|
|
|
|
|
|
rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
|
|
|
|
if (rc)
|
2022-02-01 10:10:04 +08:00
|
|
|
return ERR_PTR(rc);
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-02-01 10:10:04 +08:00
|
|
|
rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
return dport;
|
2021-06-10 00:01:46 +08:00
|
|
|
}
|
2022-10-18 21:23:32 +08:00
|
|
|
|
|
|
|
/**
|
2022-12-03 16:40:29 +08:00
|
|
|
* devm_cxl_add_dport - append VH downstream port data to a cxl_port
|
2022-10-18 21:23:32 +08:00
|
|
|
* @port: the cxl_port that references this dport
|
|
|
|
* @dport_dev: firmware or PCI device representing the dport
|
|
|
|
* @port_id: identifier for this dport in a decoder's target list
|
|
|
|
* @component_reg_phys: optional location of CXL component registers
|
|
|
|
*
|
|
|
|
* Note that dports are appended to the devm release action's of the
|
|
|
|
* either the port's host (for root ports), or the port itself (for
|
|
|
|
* switch ports)
|
|
|
|
*/
|
|
|
|
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
|
|
|
|
struct device *dport_dev, int port_id,
|
|
|
|
resource_size_t component_reg_phys)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dport;
|
|
|
|
|
|
|
|
dport = __devm_cxl_add_dport(port, dport_dev, port_id,
|
2022-12-03 16:40:29 +08:00
|
|
|
component_reg_phys, CXL_RESOURCE_NONE);
|
2022-10-18 21:23:32 +08:00
|
|
|
if (IS_ERR(dport)) {
|
|
|
|
dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
|
|
|
|
dev_name(&port->dev), PTR_ERR(dport));
|
|
|
|
} else {
|
|
|
|
dev_dbg(dport_dev, "dport added to %s\n",
|
|
|
|
dev_name(&port->dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
return dport;
|
|
|
|
}
|
2022-02-01 10:10:04 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
|
2021-06-10 00:01:46 +08:00
|
|
|
|
2022-12-03 16:40:29 +08:00
|
|
|
/**
|
|
|
|
* devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
|
|
|
|
* @port: the cxl_port that references this dport
|
|
|
|
* @dport_dev: firmware or PCI device representing the dport
|
|
|
|
* @port_id: identifier for this dport in a decoder's target list
|
|
|
|
* @component_reg_phys: optional location of CXL component registers
|
|
|
|
* @rcrb: mandatory location of a Root Complex Register Block
|
|
|
|
*
|
|
|
|
* See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
|
|
|
|
*/
|
|
|
|
struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
|
|
|
|
struct device *dport_dev, int port_id,
|
|
|
|
resource_size_t component_reg_phys,
|
|
|
|
resource_size_t rcrb)
|
|
|
|
{
|
|
|
|
struct cxl_dport *dport;
|
|
|
|
|
|
|
|
if (rcrb == CXL_RESOURCE_NONE) {
|
|
|
|
dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
dport = __devm_cxl_add_dport(port, dport_dev, port_id,
|
|
|
|
component_reg_phys, rcrb);
|
|
|
|
if (IS_ERR(dport)) {
|
|
|
|
dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
|
|
|
|
dev_name(&port->dev), PTR_ERR(dport));
|
|
|
|
} else {
|
|
|
|
dev_dbg(dport_dev, "RCH dport added to %s\n",
|
|
|
|
dev_name(&port->dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
return dport;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
|
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
static int add_ep(struct cxl_ep *new)
|
2022-02-04 23:08:40 +08:00
|
|
|
{
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_port *port = new->dport->port;
|
2022-05-28 01:58:26 +08:00
|
|
|
int rc;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(&port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (port->dead) {
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
2022-05-28 01:58:26 +08:00
|
|
|
rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
|
|
|
|
GFP_KERNEL);
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-28 01:58:26 +08:00
|
|
|
return rc;
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_add_ep - register an endpoint's interest in a port
|
2022-05-27 15:56:59 +08:00
|
|
|
* @dport: the dport that routes to @ep_dev
|
2022-02-04 23:08:40 +08:00
|
|
|
* @ep_dev: device representing the endpoint
|
|
|
|
*
|
|
|
|
* Intermediate CXL ports are scanned based on the arrival of endpoints.
|
|
|
|
* When those endpoints depart the port can be destroyed once all
|
|
|
|
* endpoints that care about that port have been removed.
|
|
|
|
*/
|
2022-05-27 15:56:59 +08:00
|
|
|
static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
|
2022-02-04 23:08:40 +08:00
|
|
|
{
|
|
|
|
struct cxl_ep *ep;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
|
|
|
|
if (!ep)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ep->ep = get_device(ep_dev);
|
2022-05-27 15:56:59 +08:00
|
|
|
ep->dport = dport;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
rc = add_ep(ep);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (rc)
|
|
|
|
cxl_ep_release(ep);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cxl_find_port_ctx {
|
|
|
|
const struct device *dport_dev;
|
|
|
|
const struct cxl_port *parent_port;
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_dport **dport;
|
2022-02-04 23:08:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int match_port_by_dport(struct device *dev, const void *data)
|
|
|
|
{
|
|
|
|
const struct cxl_find_port_ctx *ctx = data;
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_dport *dport;
|
2022-02-04 23:08:40 +08:00
|
|
|
struct cxl_port *port;
|
|
|
|
|
|
|
|
if (!is_cxl_port(dev))
|
|
|
|
return 0;
|
|
|
|
if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
port = to_cxl_port(dev);
|
2022-05-27 15:56:59 +08:00
|
|
|
dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
|
|
|
|
if (ctx->dport)
|
|
|
|
*ctx->dport = dport;
|
|
|
|
return dport != NULL;
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
if (!ctx->dport_dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
|
|
|
|
if (dev)
|
|
|
|
return to_cxl_port(dev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
static struct cxl_port *find_cxl_port(struct device *dport_dev,
|
|
|
|
struct cxl_dport **dport)
|
2022-02-04 23:08:40 +08:00
|
|
|
{
|
|
|
|
struct cxl_find_port_ctx ctx = {
|
|
|
|
.dport_dev = dport_dev,
|
2022-05-27 15:56:59 +08:00
|
|
|
.dport = dport,
|
2022-02-04 23:08:40 +08:00
|
|
|
};
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_port *port;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
port = __find_cxl_port(&ctx);
|
|
|
|
return port;
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
|
2022-05-27 15:56:59 +08:00
|
|
|
struct device *dport_dev,
|
|
|
|
struct cxl_dport **dport)
|
2022-02-04 23:08:40 +08:00
|
|
|
{
|
|
|
|
struct cxl_find_port_ctx ctx = {
|
|
|
|
.dport_dev = dport_dev,
|
|
|
|
.parent_port = parent_port,
|
2022-05-27 15:56:59 +08:00
|
|
|
.dport = dport,
|
2022-02-04 23:08:40 +08:00
|
|
|
};
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_port *port;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
port = __find_cxl_port(&ctx);
|
|
|
|
return port;
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All users of grandparent() are using it to walk PCIe-like swich port
|
|
|
|
* hierarchy. A PCIe switch is comprised of a bridge device representing the
|
|
|
|
* upstream switch port and N bridges representing downstream switch ports. When
|
|
|
|
* bridges stack the grand-parent of a downstream switch port is another
|
|
|
|
* downstream switch port in the immediate ancestor switch.
|
|
|
|
*/
|
|
|
|
static struct device *grandparent(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev && dev->parent)
|
|
|
|
return dev->parent->parent;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-02-04 23:18:31 +08:00
|
|
|
static void delete_endpoint(void *data)
|
|
|
|
{
|
|
|
|
struct cxl_memdev *cxlmd = data;
|
|
|
|
struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
|
|
|
|
struct cxl_port *parent_port;
|
|
|
|
struct device *parent;
|
|
|
|
|
2022-05-28 01:57:01 +08:00
|
|
|
parent_port = cxl_mem_find_port(cxlmd, NULL);
|
2022-02-04 23:18:31 +08:00
|
|
|
if (!parent_port)
|
2022-02-11 09:04:42 +08:00
|
|
|
goto out;
|
2022-02-04 23:18:31 +08:00
|
|
|
parent = &parent_port->dev;
|
|
|
|
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(parent);
|
2022-06-04 07:43:48 +08:00
|
|
|
if (parent->driver && !endpoint->dead) {
|
2023-01-26 07:32:57 +08:00
|
|
|
devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
|
2022-02-04 23:18:31 +08:00
|
|
|
devm_release_action(parent, cxl_unlink_uport, endpoint);
|
|
|
|
devm_release_action(parent, unregister_port, endpoint);
|
|
|
|
}
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(parent);
|
2022-02-04 23:18:31 +08:00
|
|
|
put_device(parent);
|
2022-02-11 09:04:42 +08:00
|
|
|
out:
|
2022-02-04 23:18:31 +08:00
|
|
|
put_device(&endpoint->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlmd->dev;
|
|
|
|
|
|
|
|
get_device(&endpoint->dev);
|
|
|
|
dev_set_drvdata(dev, endpoint);
|
2023-02-11 09:29:09 +08:00
|
|
|
cxlmd->depth = endpoint->depth;
|
2022-02-04 23:18:31 +08:00
|
|
|
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
/*
|
|
|
|
* The natural end of life of a non-root 'cxl_port' is when its parent port goes
|
|
|
|
* through a ->remove() event ("top-down" unregistration). The unnatural trigger
|
|
|
|
* for a port to be unregistered is when all memdevs beneath that port have gone
|
|
|
|
* through ->remove(). This "bottom-up" removal selectively removes individual
|
|
|
|
* child ports manually. This depends on devm_cxl_add_port() to not change is
|
2022-05-28 11:51:19 +08:00
|
|
|
* devm action registration order, and for dports to have already been
|
|
|
|
* destroyed by reap_dports().
|
2022-02-04 23:08:40 +08:00
|
|
|
*/
|
2022-05-28 11:51:19 +08:00
|
|
|
static void delete_switch_port(struct cxl_port *port)
|
|
|
|
{
|
2023-01-26 07:32:57 +08:00
|
|
|
devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
|
2022-05-28 11:51:19 +08:00
|
|
|
devm_release_action(port->dev.parent, cxl_unlink_uport, port);
|
|
|
|
devm_release_action(port->dev.parent, unregister_port, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reap_dports(struct cxl_port *port)
|
2022-02-04 23:08:40 +08:00
|
|
|
{
|
2022-05-28 11:51:19 +08:00
|
|
|
struct cxl_dport *dport;
|
|
|
|
unsigned long index;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-28 11:51:19 +08:00
|
|
|
device_lock_assert(&port->dev);
|
|
|
|
|
|
|
|
xa_for_each(&port->dports, index, dport) {
|
2022-02-04 23:08:40 +08:00
|
|
|
devm_release_action(&port->dev, cxl_dport_unlink, dport);
|
|
|
|
devm_release_action(&port->dev, cxl_dport_remove, dport);
|
|
|
|
devm_kfree(&port->dev, dport);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-11 09:29:09 +08:00
|
|
|
struct detach_ctx {
|
|
|
|
struct cxl_memdev *cxlmd;
|
|
|
|
int depth;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int port_has_memdev(struct device *dev, const void *data)
|
|
|
|
{
|
|
|
|
const struct detach_ctx *ctx = data;
|
|
|
|
struct cxl_port *port;
|
|
|
|
|
|
|
|
if (!is_cxl_port(dev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
port = to_cxl_port(dev);
|
|
|
|
if (port->depth != ctx->depth)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return !!cxl_ep_load(port, ctx->cxlmd);
|
|
|
|
}
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
static void cxl_detach_ep(void *data)
|
|
|
|
{
|
|
|
|
struct cxl_memdev *cxlmd = data;
|
|
|
|
|
2023-02-11 09:29:09 +08:00
|
|
|
for (int i = cxlmd->depth - 1; i >= 1; i--) {
|
2022-02-04 23:08:40 +08:00
|
|
|
struct cxl_port *port, *parent_port;
|
2023-02-11 09:29:09 +08:00
|
|
|
struct detach_ctx ctx = {
|
|
|
|
.cxlmd = cxlmd,
|
|
|
|
.depth = i,
|
|
|
|
};
|
|
|
|
struct device *dev;
|
2022-02-04 23:08:40 +08:00
|
|
|
struct cxl_ep *ep;
|
2022-05-28 11:51:19 +08:00
|
|
|
bool died = false;
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2023-02-11 09:29:09 +08:00
|
|
|
dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
|
|
|
|
port_has_memdev);
|
|
|
|
if (!dev)
|
2022-03-07 17:41:48 +08:00
|
|
|
continue;
|
2023-02-11 09:29:09 +08:00
|
|
|
port = to_cxl_port(dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
|
|
|
|
parent_port = to_cxl_port(port->dev.parent);
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(&parent_port->dev);
|
|
|
|
device_lock(&port->dev);
|
2022-05-28 01:58:26 +08:00
|
|
|
ep = cxl_ep_load(port, cxlmd);
|
2022-02-04 23:08:40 +08:00
|
|
|
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
|
|
|
|
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
|
2022-05-28 01:58:26 +08:00
|
|
|
cxl_ep_remove(port, ep);
|
|
|
|
if (ep && !port->dead && xa_empty(&port->endpoints) &&
|
2023-02-11 09:29:09 +08:00
|
|
|
!is_cxl_root(parent_port) && parent_port->dev.driver) {
|
2022-02-04 23:08:40 +08:00
|
|
|
/*
|
|
|
|
* This was the last ep attached to a dynamically
|
|
|
|
* enumerated port. Block new cxl_add_ep() and garbage
|
|
|
|
* collect the port.
|
|
|
|
*/
|
2022-05-28 11:51:19 +08:00
|
|
|
died = true;
|
2022-02-04 23:08:40 +08:00
|
|
|
port->dead = true;
|
2022-05-28 11:51:19 +08:00
|
|
|
reap_dports(port);
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
|
2022-05-28 11:51:19 +08:00
|
|
|
if (died) {
|
2022-02-04 23:08:40 +08:00
|
|
|
dev_dbg(&cxlmd->dev, "delete %s\n",
|
|
|
|
dev_name(&port->dev));
|
2022-05-28 11:51:19 +08:00
|
|
|
delete_switch_port(port);
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
put_device(&port->dev);
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&parent_port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static resource_size_t find_component_registers(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_register_map map;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Theoretically, CXL component registers can be hosted on a
|
|
|
|
* non-PCI device, in practice, only cxl_test hits this case.
|
|
|
|
*/
|
|
|
|
if (!dev_is_pci(dev))
|
|
|
|
return CXL_RESOURCE_NONE;
|
|
|
|
|
|
|
|
pdev = to_pci_dev(dev);
|
|
|
|
|
|
|
|
cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
|
2022-11-30 01:48:30 +08:00
|
|
|
return map.resource;
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
|
|
|
|
struct device *uport_dev,
|
|
|
|
struct device *dport_dev)
|
|
|
|
{
|
|
|
|
struct device *dparent = grandparent(dport_dev);
|
|
|
|
struct cxl_port *port, *parent_port = NULL;
|
2022-05-28 01:57:01 +08:00
|
|
|
struct cxl_dport *dport, *parent_dport;
|
2022-02-04 23:08:40 +08:00
|
|
|
resource_size_t component_reg_phys;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!dparent) {
|
|
|
|
/*
|
|
|
|
* The iteration reached the topology root without finding the
|
|
|
|
* CXL-root 'cxl_port' on a previous iteration, fail for now to
|
|
|
|
* be re-probed after platform driver attaches.
|
|
|
|
*/
|
|
|
|
dev_dbg(&cxlmd->dev, "%s is a root dport\n",
|
|
|
|
dev_name(dport_dev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2022-05-28 01:57:01 +08:00
|
|
|
parent_port = find_cxl_port(dparent, &parent_dport);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (!parent_port) {
|
|
|
|
/* iterate to create this parent_port */
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(&parent_port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (!parent_port->dev.driver) {
|
|
|
|
dev_warn(&cxlmd->dev,
|
|
|
|
"port %s:%s disabled, failed to enumerate CXL.mem\n",
|
|
|
|
dev_name(&parent_port->dev), dev_name(uport_dev));
|
|
|
|
port = ERR_PTR(-ENXIO);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-05-27 15:56:59 +08:00
|
|
|
port = find_cxl_port_at(parent_port, dport_dev, &dport);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (!port) {
|
|
|
|
component_reg_phys = find_component_registers(uport_dev);
|
|
|
|
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
|
2022-05-28 01:57:01 +08:00
|
|
|
component_reg_phys, parent_dport);
|
2022-05-27 15:56:59 +08:00
|
|
|
/* retry find to pick up the new dport information */
|
2022-02-04 23:08:40 +08:00
|
|
|
if (!IS_ERR(port))
|
2022-05-27 15:56:59 +08:00
|
|
|
port = find_cxl_port_at(parent_port, dport_dev, &dport);
|
2022-02-04 23:08:40 +08:00
|
|
|
}
|
|
|
|
out:
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&parent_port->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
|
|
|
|
if (IS_ERR(port))
|
|
|
|
rc = PTR_ERR(port);
|
|
|
|
else {
|
|
|
|
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
|
|
|
|
dev_name(&port->dev), dev_name(port->uport));
|
2022-05-27 15:56:59 +08:00
|
|
|
rc = cxl_add_ep(dport, &cxlmd->dev);
|
2022-05-28 01:58:26 +08:00
|
|
|
if (rc == -EBUSY) {
|
2022-02-04 23:08:40 +08:00
|
|
|
/*
|
|
|
|
* "can't" happen, but this error code means
|
|
|
|
* something to the caller, so translate it.
|
|
|
|
*/
|
|
|
|
rc = -ENXIO;
|
|
|
|
}
|
|
|
|
put_device(&port->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
put_device(&parent_port->dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlmd->dev;
|
|
|
|
struct device *iter;
|
|
|
|
int rc;
|
|
|
|
|
2022-12-02 05:34:16 +08:00
|
|
|
/*
|
|
|
|
* Skip intermediate port enumeration in the RCH case, there
|
|
|
|
* are no ports in between a host bridge and an endpoint.
|
|
|
|
*/
|
|
|
|
if (cxlmd->cxlds->rcd)
|
|
|
|
return 0;
|
|
|
|
|
2022-02-04 23:08:40 +08:00
|
|
|
rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan for and add all cxl_ports in this device's ancestry.
|
|
|
|
* Repeat until no more ports are added. Abort if a port add
|
|
|
|
* attempt fails.
|
|
|
|
*/
|
|
|
|
retry:
|
|
|
|
for (iter = dev; iter; iter = grandparent(iter)) {
|
|
|
|
struct device *dport_dev = grandparent(iter);
|
|
|
|
struct device *uport_dev;
|
2022-05-27 15:56:59 +08:00
|
|
|
struct cxl_dport *dport;
|
2022-02-04 23:08:40 +08:00
|
|
|
struct cxl_port *port;
|
|
|
|
|
|
|
|
if (!dport_dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uport_dev = dport_dev->parent;
|
|
|
|
if (!uport_dev) {
|
|
|
|
dev_warn(dev, "at %s no parent for dport: %s\n",
|
|
|
|
dev_name(iter), dev_name(dport_dev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
|
|
|
|
dev_name(iter), dev_name(dport_dev),
|
|
|
|
dev_name(uport_dev));
|
2022-05-27 15:56:59 +08:00
|
|
|
port = find_cxl_port(dport_dev, &dport);
|
2022-02-04 23:08:40 +08:00
|
|
|
if (port) {
|
|
|
|
dev_dbg(&cxlmd->dev,
|
|
|
|
"found already registered port %s:%s\n",
|
|
|
|
dev_name(&port->dev), dev_name(port->uport));
|
2022-05-27 15:56:59 +08:00
|
|
|
rc = cxl_add_ep(dport, &cxlmd->dev);
|
2022-02-04 23:08:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the endpoint already exists in the port's list,
|
|
|
|
* that's ok, it was added on a previous pass.
|
|
|
|
* Otherwise, retry in add_port_attach_ep() after taking
|
|
|
|
* the parent_port lock as the current port may be being
|
|
|
|
* reaped.
|
|
|
|
*/
|
2022-05-28 01:58:26 +08:00
|
|
|
if (rc && rc != -EBUSY) {
|
2022-02-04 23:08:40 +08:00
|
|
|
put_device(&port->dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Any more ports to add between this one and the root? */
|
|
|
|
if (!dev_is_cxl_root_child(&port->dev)) {
|
|
|
|
put_device(&port->dev);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
put_device(&port->dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
|
|
|
|
/* port missing, try to add parent */
|
|
|
|
if (rc == -EAGAIN)
|
|
|
|
continue;
|
|
|
|
/* failed to add ep or port */
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* port added, new descendants possible, start over */
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
|
|
|
|
|
2022-05-28 01:57:01 +08:00
|
|
|
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
|
|
|
|
struct cxl_dport **dport)
|
2022-02-04 23:18:31 +08:00
|
|
|
{
|
2022-05-28 01:57:01 +08:00
|
|
|
return find_cxl_port(grandparent(&cxlmd->dev), dport);
|
2022-02-04 23:18:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
|
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
|
2021-09-22 03:22:16 +08:00
|
|
|
struct cxl_port *port, int *target_map)
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
{
|
2022-02-02 04:24:30 +08:00
|
|
|
int i, rc = 0;
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
|
|
|
|
if (!target_map)
|
|
|
|
return 0;
|
|
|
|
|
2022-02-02 04:24:30 +08:00
|
|
|
device_lock_assert(&port->dev);
|
|
|
|
|
2022-05-28 11:51:19 +08:00
|
|
|
if (xa_empty(&port->dports))
|
2022-02-02 04:24:30 +08:00
|
|
|
return -EINVAL;
|
2021-09-22 03:22:16 +08:00
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
write_seqlock(&cxlsd->target_lock);
|
|
|
|
for (i = 0; i < cxlsd->nr_targets; i++) {
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
struct cxl_dport *dport = find_dport(port, target_map[i]);
|
|
|
|
|
|
|
|
if (!dport) {
|
|
|
|
rc = -ENXIO;
|
2022-02-01 07:35:18 +08:00
|
|
|
break;
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
}
|
2022-05-19 08:52:23 +08:00
|
|
|
cxlsd->target[i] = dport;
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
}
|
2022-05-19 08:52:23 +08:00
|
|
|
write_sequnlock(&cxlsd->target_lock);
|
2021-09-22 03:22:16 +08:00
|
|
|
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-12-01 06:47:25 +08:00
|
|
|
struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
|
2022-06-07 04:32:01 +08:00
|
|
|
{
|
|
|
|
struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
|
|
|
|
struct cxl_decoder *cxld = &cxlsd->cxld;
|
|
|
|
int iw;
|
|
|
|
|
|
|
|
iw = cxld->interleave_ways;
|
|
|
|
if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
|
|
|
|
"misconfigured root decoder\n"))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cxlrd->cxlsd.target[pos % iw];
|
|
|
|
}
|
2022-12-01 06:47:25 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
|
2022-06-07 04:32:01 +08:00
|
|
|
|
2022-04-21 23:33:13 +08:00
|
|
|
static struct lock_class_key cxl_decoder_key;
|
|
|
|
|
2022-02-01 05:33:13 +08:00
|
|
|
/**
|
2022-05-19 08:52:23 +08:00
|
|
|
* cxl_decoder_init - Common decoder setup / initialization
|
2022-02-01 05:33:13 +08:00
|
|
|
* @port: owning port of this decoder
|
2022-05-19 08:52:23 +08:00
|
|
|
* @cxld: common decoder properties to initialize
|
2022-02-01 05:33:13 +08:00
|
|
|
*
|
2022-05-19 08:52:23 +08:00
|
|
|
* A port may contain one or more decoders. Each of those decoders
|
|
|
|
* enable some address space for CXL.mem utilization. A decoder is
|
|
|
|
* expected to be configured by the caller before registering via
|
|
|
|
* cxl_decoder_add()
|
2022-02-01 05:33:13 +08:00
|
|
|
*/
|
2022-05-19 08:52:23 +08:00
|
|
|
static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
|
2021-06-10 00:43:29 +08:00
|
|
|
{
|
|
|
|
struct device *dev;
|
2022-05-19 08:52:23 +08:00
|
|
|
int rc;
|
2021-06-10 00:43:29 +08:00
|
|
|
|
|
|
|
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
|
|
|
|
if (rc < 0)
|
2022-05-19 08:52:23 +08:00
|
|
|
return rc;
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2022-02-17 08:25:11 +08:00
|
|
|
/* need parent to stick around to release the id */
|
|
|
|
get_device(&port->dev);
|
2021-09-22 03:22:16 +08:00
|
|
|
cxld->id = rc;
|
2022-02-17 08:25:11 +08:00
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
dev = &cxld->dev;
|
|
|
|
device_initialize(dev);
|
2022-04-21 23:33:13 +08:00
|
|
|
lockdep_set_class(&dev->mutex, &cxl_decoder_key);
|
2021-06-10 00:43:29 +08:00
|
|
|
device_set_pm_not_required(dev);
|
|
|
|
dev->parent = &port->dev;
|
|
|
|
dev->bus = &cxl_bus_type;
|
|
|
|
|
2022-01-24 08:29:47 +08:00
|
|
|
/* Pre initialize an "empty" decoder */
|
|
|
|
cxld->interleave_ways = 1;
|
|
|
|
cxld->interleave_granularity = PAGE_SIZE;
|
|
|
|
cxld->target_type = CXL_DECODER_EXPANDER;
|
2022-05-19 09:02:39 +08:00
|
|
|
cxld->hpa_range = (struct range) {
|
|
|
|
.start = 0,
|
|
|
|
.end = -1,
|
|
|
|
};
|
2022-01-24 08:29:47 +08:00
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_switch_decoder_init(struct cxl_port *port,
|
|
|
|
struct cxl_switch_decoder *cxlsd,
|
|
|
|
int nr_targets)
|
|
|
|
{
|
|
|
|
if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cxlsd->nr_targets = nr_targets;
|
|
|
|
seqlock_init(&cxlsd->target_lock);
|
|
|
|
return cxl_decoder_init(port, &cxlsd->cxld);
|
2021-06-10 00:43:29 +08:00
|
|
|
}
|
|
|
|
|
2022-02-01 05:33:13 +08:00
|
|
|
/**
|
|
|
|
* cxl_root_decoder_alloc - Allocate a root level decoder
|
|
|
|
* @port: owning CXL root of this decoder
|
|
|
|
* @nr_targets: static number of downstream targets
|
2022-12-01 06:47:25 +08:00
|
|
|
* @calc_hb: which host bridge covers the n'th position by granularity
|
2022-02-01 05:33:13 +08:00
|
|
|
*
|
|
|
|
* Return: A new cxl decoder to be registered by cxl_decoder_add(). A
|
|
|
|
* 'CXL root' decoder is one that decodes from a top-level / static platform
|
|
|
|
* firmware description of CXL resources into a CXL standard decode
|
|
|
|
* topology.
|
|
|
|
*/
|
2022-07-13 09:38:26 +08:00
|
|
|
struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
|
2022-12-01 06:47:25 +08:00
|
|
|
unsigned int nr_targets,
|
|
|
|
cxl_calc_hb_fn calc_hb)
|
2022-02-01 05:33:13 +08:00
|
|
|
{
|
2022-07-13 09:38:26 +08:00
|
|
|
struct cxl_root_decoder *cxlrd;
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_switch_decoder *cxlsd;
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
int rc;
|
|
|
|
|
2022-02-01 05:33:13 +08:00
|
|
|
if (!is_cxl_root(port))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2022-07-13 09:38:26 +08:00
|
|
|
cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!cxlrd)
|
2022-05-19 08:52:23 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2022-07-13 09:38:26 +08:00
|
|
|
cxlsd = &cxlrd->cxlsd;
|
2022-05-19 08:52:23 +08:00
|
|
|
rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
|
|
|
|
if (rc) {
|
2022-07-13 09:38:26 +08:00
|
|
|
kfree(cxlrd);
|
2022-05-19 08:52:23 +08:00
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
2022-12-01 06:47:25 +08:00
|
|
|
cxlrd->calc_hb = calc_hb;
|
2022-06-07 04:32:01 +08:00
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
cxld = &cxlsd->cxld;
|
|
|
|
cxld->dev.type = &cxl_decoder_root_type;
|
cxl/region: Add region creation support
CXL 2.0 allows for dynamic provisioning of new memory regions (system
physical address resources like "System RAM" and "Persistent Memory").
Whereas DDR and PMEM resources are conveyed statically at boot, CXL
allows for assembling and instantiating new regions from the available
capacity of CXL memory expanders in the system.
Sysfs with an "echo $region_name > $create_region_attribute" interface
is chosen as the mechanism to initiate the provisioning process. This
was chosen over ioctl() and netlink() to keep the configuration
interface entirely in a pseudo-fs interface, and it was chosen over
configfs since, aside from this one creation event, the interface is
read-mostly. I.e. configfs supports cases where an object is designed to
be provisioned each boot, like an iSCSI storage target, and CXL region
creation is mostly for PMEM regions which are created usually once
per-lifetime of a server instance. This is an improvement over nvdimm
that pre-created "seed" devices that tended to confuse users looking to
determine which devices are active and which are idle.
Recall that the major change that CXL brings over previous persistent
memory architectures is the ability to dynamically define new regions.
Compare that to drivers like 'nfit' where the region configuration is
statically defined by platform firmware.
Regions are created as a child of a root decoder that encompasses an
address space with constraints. When created through sysfs, the root
decoder is explicit. When created from an LSA's region structure a root
decoder will possibly need to be inferred by the driver.
Upon region creation through sysfs, a vacant region is created with a
unique name. Regions have a number of attributes that must be configured
before the region can be bound to the driver where HDM decoder program
is completed.
An example of creating a new region:
- Allocate a new region name:
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
- Create a new region by name:
while
region=$(cat /sys/bus/cxl/devices/decoder0.0/create_pmem_region)
! echo $region > /sys/bus/cxl/devices/decoder0.0/create_pmem_region
do true; done
- Region now exists in sysfs:
stat -t /sys/bus/cxl/devices/decoder0.0/$region
- Delete the region, and name:
echo $region > /sys/bus/cxl/devices/decoder0.0/delete_region
Signed-off-by: Ben Widawsky <bwidawsk@kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784333909.1758207.794374602146306032.stgit@dwillia2-xfh.jf.intel.com
[djbw: simplify locking, reword changelog]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-06-09 01:28:34 +08:00
|
|
|
/*
|
|
|
|
* cxl_root_decoder_release() special cases negative ids to
|
|
|
|
* detect memregion_alloc() failures.
|
|
|
|
*/
|
|
|
|
atomic_set(&cxlrd->region_id, -1);
|
|
|
|
rc = memregion_alloc(GFP_KERNEL);
|
|
|
|
if (rc < 0) {
|
|
|
|
put_device(&cxld->dev);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_set(&cxlrd->region_id, rc);
|
2022-07-13 09:38:26 +08:00
|
|
|
return cxlrd;
|
2022-02-01 05:33:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_switch_decoder_alloc - Allocate a switch level decoder
|
|
|
|
* @port: owning CXL switch port of this decoder
|
|
|
|
* @nr_targets: max number of dynamically addressable downstream targets
|
|
|
|
*
|
|
|
|
* Return: A new cxl decoder to be registered by cxl_decoder_add(). A
|
|
|
|
* 'switch' decoder is any decoder that can be enumerated by PCIe
|
|
|
|
* topology and the HDM Decoder Capability. This includes the decoders
|
|
|
|
* that sit between Switch Upstream Ports / Switch Downstream Ports and
|
|
|
|
* Host Bridges / Root Ports.
|
|
|
|
*/
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
|
|
|
|
unsigned int nr_targets)
|
2022-02-01 05:33:13 +08:00
|
|
|
{
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_switch_decoder *cxlsd;
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
int rc;
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
if (is_cxl_root(port) || is_cxl_endpoint(port))
|
2022-02-01 05:33:13 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2022-05-19 08:52:23 +08:00
|
|
|
cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
|
|
|
|
if (!cxlsd)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
|
|
|
|
if (rc) {
|
|
|
|
kfree(cxlsd);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
cxld = &cxlsd->cxld;
|
|
|
|
cxld->dev.type = &cxl_decoder_switch_type;
|
|
|
|
return cxlsd;
|
2022-02-01 05:33:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
/**
|
|
|
|
* cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
|
|
|
|
* @port: owning port of this decoder
|
|
|
|
*
|
|
|
|
* Return: A new cxl decoder to be registered by cxl_decoder_add()
|
|
|
|
*/
|
2022-05-22 07:24:14 +08:00
|
|
|
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
|
2022-02-03 12:02:06 +08:00
|
|
|
{
|
2022-05-22 07:24:14 +08:00
|
|
|
struct cxl_endpoint_decoder *cxled;
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
int rc;
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
if (!is_cxl_endpoint(port))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2022-05-22 07:24:14 +08:00
|
|
|
cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
|
|
|
|
if (!cxled)
|
2022-05-19 08:52:23 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2022-06-05 06:49:53 +08:00
|
|
|
cxled->pos = -1;
|
2022-05-22 07:24:14 +08:00
|
|
|
cxld = &cxled->cxld;
|
2022-05-19 08:52:23 +08:00
|
|
|
rc = cxl_decoder_init(port, cxld);
|
|
|
|
if (rc) {
|
2022-05-22 07:24:14 +08:00
|
|
|
kfree(cxled);
|
2022-05-19 08:52:23 +08:00
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
cxld->dev.type = &cxl_decoder_endpoint_type;
|
2022-05-22 07:24:14 +08:00
|
|
|
return cxled;
|
2022-02-03 12:02:06 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
|
|
|
|
|
2022-02-01 05:33:13 +08:00
|
|
|
/**
|
2022-02-02 04:24:30 +08:00
|
|
|
* cxl_decoder_add_locked - Add a decoder with targets
|
2022-05-19 08:52:23 +08:00
|
|
|
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
|
2022-02-01 05:33:13 +08:00
|
|
|
* @target_map: A list of downstream ports that this decoder can direct memory
|
|
|
|
* traffic to. These numbers should correspond with the port number
|
|
|
|
* in the PCIe Link Capabilities structure.
|
|
|
|
*
|
|
|
|
* Certain types of decoders may not have any targets. The main example of this
|
|
|
|
* is an endpoint device. A more awkward example is a hostbridge whose root
|
|
|
|
* ports get hot added (technically possible, though unlikely).
|
|
|
|
*
|
2022-02-02 04:24:30 +08:00
|
|
|
* This is the locked variant of cxl_decoder_add().
|
|
|
|
*
|
|
|
|
* Context: Process context. Expects the device lock of the port that owns the
|
|
|
|
* @cxld to be held.
|
2022-02-01 05:33:13 +08:00
|
|
|
*
|
|
|
|
* Return: Negative error code if the decoder wasn't properly configured; else
|
|
|
|
* returns 0.
|
|
|
|
*/
|
2022-02-02 04:24:30 +08:00
|
|
|
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
|
2021-06-10 00:43:29 +08:00
|
|
|
{
|
2021-09-22 03:22:16 +08:00
|
|
|
struct cxl_port *port;
|
2021-06-10 00:43:29 +08:00
|
|
|
struct device *dev;
|
|
|
|
int rc;
|
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
if (WARN_ON_ONCE(!cxld))
|
|
|
|
return -EINVAL;
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
if (WARN_ON_ONCE(IS_ERR(cxld)))
|
|
|
|
return PTR_ERR(cxld);
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
if (cxld->interleave_ways < 1)
|
|
|
|
return -EINVAL;
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
dev = &cxld->dev;
|
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
port = to_cxl_port(cxld->dev.parent);
|
2022-02-03 12:02:06 +08:00
|
|
|
if (!is_endpoint_decoder(dev)) {
|
2022-05-19 08:52:23 +08:00
|
|
|
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
|
|
|
|
|
|
|
|
rc = decoder_populate_targets(cxlsd, port, target_map);
|
2022-01-26 13:24:04 +08:00
|
|
|
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
|
|
|
|
dev_err(&port->dev,
|
|
|
|
"Failed to populate active decoder targets\n");
|
2022-02-03 12:02:06 +08:00
|
|
|
return rc;
|
2022-01-26 13:24:04 +08:00
|
|
|
}
|
2022-02-03 12:02:06 +08:00
|
|
|
}
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
|
2021-06-10 00:43:29 +08:00
|
|
|
if (rc)
|
2021-09-22 03:22:16 +08:00
|
|
|
return rc;
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
return device_add(dev);
|
2021-06-10 00:43:29 +08:00
|
|
|
}
|
2022-02-02 04:24:30 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_decoder_add - Add a decoder with targets
|
2022-05-19 08:52:23 +08:00
|
|
|
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
|
2022-02-02 04:24:30 +08:00
|
|
|
* @target_map: A list of downstream ports that this decoder can direct memory
|
|
|
|
* traffic to. These numbers should correspond with the port number
|
|
|
|
* in the PCIe Link Capabilities structure.
|
|
|
|
*
|
|
|
|
* This is the unlocked variant of cxl_decoder_add_locked().
|
|
|
|
* See cxl_decoder_add_locked().
|
|
|
|
*
|
|
|
|
* Context: Process context. Takes and releases the device lock of the port that
|
|
|
|
* owns the @cxld.
|
|
|
|
*/
|
|
|
|
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
|
|
|
|
{
|
|
|
|
struct cxl_port *port;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!cxld))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(IS_ERR(cxld)))
|
|
|
|
return PTR_ERR(cxld);
|
|
|
|
|
|
|
|
port = to_cxl_port(cxld->dev.parent);
|
|
|
|
|
2022-04-21 23:33:23 +08:00
|
|
|
device_lock(&port->dev);
|
2022-02-02 04:24:30 +08:00
|
|
|
rc = cxl_decoder_add_locked(cxld, target_map);
|
2022-04-21 23:33:23 +08:00
|
|
|
device_unlock(&port->dev);
|
2022-02-02 04:24:30 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
static void cxld_unregister(void *dev)
|
|
|
|
{
|
2022-06-05 06:49:53 +08:00
|
|
|
struct cxl_endpoint_decoder *cxled;
|
|
|
|
|
|
|
|
if (is_endpoint_decoder(dev)) {
|
|
|
|
cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
cxl_decoder_kill_region(cxled);
|
|
|
|
}
|
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
device_unregister(dev);
|
|
|
|
}
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
|
2021-09-22 03:22:16 +08:00
|
|
|
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
|
|
|
|
{
|
|
|
|
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
|
2021-06-16 07:18:11 +08:00
|
|
|
/**
|
|
|
|
* __cxl_driver_register - register a driver for the cxl bus
|
|
|
|
* @cxl_drv: cxl driver structure to attach
|
|
|
|
* @owner: owning module/driver
|
|
|
|
* @modname: KBUILD_MODNAME for parent driver
|
|
|
|
*/
|
|
|
|
int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
|
|
|
|
const char *modname)
|
|
|
|
{
|
|
|
|
if (!cxl_drv->probe) {
|
|
|
|
pr_debug("%s ->probe() must be specified\n", modname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cxl_drv->name) {
|
|
|
|
pr_debug("%s ->name must be specified\n", modname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cxl_drv->id) {
|
|
|
|
pr_debug("%s ->id must be specified\n", modname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cxl_drv->drv.bus = &cxl_bus_type;
|
|
|
|
cxl_drv->drv.owner = owner;
|
|
|
|
cxl_drv->drv.mod_name = modname;
|
|
|
|
cxl_drv->drv.name = cxl_drv->name;
|
|
|
|
|
|
|
|
return driver_register(&cxl_drv->drv);
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
|
2021-06-16 07:18:11 +08:00
|
|
|
|
|
|
|
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
|
|
|
|
{
|
|
|
|
driver_unregister(&cxl_drv->drv);
|
|
|
|
}
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
|
2021-06-16 07:18:11 +08:00
|
|
|
|
|
|
|
static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
|
|
{
|
|
|
|
return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
|
|
|
|
cxl_device_id(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_bus_match(struct device *dev, struct device_driver *drv)
|
|
|
|
{
|
|
|
|
return cxl_device_id(dev) == to_cxl_drv(drv)->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_bus_probe(struct device *dev)
|
|
|
|
{
|
2022-02-01 03:50:09 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = to_cxl_drv(dev->driver)->probe(dev);
|
2022-02-02 05:07:51 +08:00
|
|
|
dev_dbg(dev, "probe: %d\n", rc);
|
2022-02-01 03:50:09 +08:00
|
|
|
return rc;
|
2021-06-16 07:18:11 +08:00
|
|
|
}
|
|
|
|
|
2021-07-14 03:35:22 +08:00
|
|
|
static void cxl_bus_remove(struct device *dev)
|
2021-06-16 07:18:11 +08:00
|
|
|
{
|
|
|
|
struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
|
|
|
|
|
|
|
|
if (cxl_drv->remove)
|
|
|
|
cxl_drv->remove(dev);
|
|
|
|
}
|
|
|
|
|
2022-02-04 23:18:31 +08:00
|
|
|
static struct workqueue_struct *cxl_bus_wq;
|
|
|
|
|
2022-12-02 05:33:48 +08:00
|
|
|
static void cxl_bus_rescan_queue(struct work_struct *w)
|
2022-02-04 23:18:31 +08:00
|
|
|
{
|
2022-12-02 05:33:48 +08:00
|
|
|
int rc = bus_rescan_devices(&cxl_bus_type);
|
|
|
|
|
|
|
|
pr_debug("CXL bus rescan result: %d\n", rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cxl_bus_rescan(void)
|
|
|
|
{
|
|
|
|
static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
|
|
|
|
|
|
|
|
queue_work(cxl_bus_wq, &rescan_work);
|
2022-02-04 23:18:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
|
|
|
|
|
2022-12-02 05:33:48 +08:00
|
|
|
void cxl_bus_drain(void)
|
|
|
|
{
|
|
|
|
drain_workqueue(cxl_bus_wq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
|
|
|
|
|
2022-02-04 23:18:31 +08:00
|
|
|
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
|
|
|
|
{
|
|
|
|
return queue_work(cxl_bus_wq, &cxlmd->detach_work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
|
|
|
|
|
|
|
|
/* for user tooling to ensure port disable work has completed */
|
|
|
|
static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
if (sysfs_streq(buf, "1")) {
|
|
|
|
flush_workqueue(cxl_bus_wq);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static BUS_ATTR_WO(flush);
|
|
|
|
|
|
|
|
static struct attribute *cxl_bus_attributes[] = {
|
|
|
|
&bus_attr_flush.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cxl_bus_attribute_group = {
|
|
|
|
.attrs = cxl_bus_attributes,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_bus_attribute_groups[] = {
|
|
|
|
&cxl_bus_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
struct bus_type cxl_bus_type = {
|
|
|
|
.name = "cxl",
|
2021-06-16 07:18:11 +08:00
|
|
|
.uevent = cxl_bus_uevent,
|
|
|
|
.match = cxl_bus_match,
|
|
|
|
.probe = cxl_bus_probe,
|
|
|
|
.remove = cxl_bus_remove,
|
2022-02-04 23:18:31 +08:00
|
|
|
.bus_groups = cxl_bus_attribute_groups,
|
2021-02-17 12:09:52 +08:00
|
|
|
};
|
2021-11-13 08:32:58 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2022-07-11 00:57:28 +08:00
|
|
|
static struct dentry *cxl_debugfs;
|
|
|
|
|
|
|
|
struct dentry *cxl_debugfs_create_dir(const char *dir)
|
|
|
|
{
|
|
|
|
return debugfs_create_dir(dir, cxl_debugfs);
|
|
|
|
}
|
2022-05-27 03:15:25 +08:00
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
|
2022-07-11 00:57:28 +08:00
|
|
|
|
2021-05-14 13:22:00 +08:00
|
|
|
static __init int cxl_core_init(void)
|
2021-02-17 12:09:52 +08:00
|
|
|
{
|
2021-08-03 01:30:05 +08:00
|
|
|
int rc;
|
|
|
|
|
2022-07-11 00:57:28 +08:00
|
|
|
cxl_debugfs = debugfs_create_dir("cxl", NULL);
|
|
|
|
|
2021-09-09 13:12:32 +08:00
|
|
|
cxl_mbox_init();
|
|
|
|
|
2021-08-03 01:30:05 +08:00
|
|
|
rc = cxl_memdev_init();
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2022-02-04 23:18:31 +08:00
|
|
|
cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
|
|
|
|
if (!cxl_bus_wq) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_wq;
|
|
|
|
}
|
|
|
|
|
2021-08-03 01:30:05 +08:00
|
|
|
rc = bus_register(&cxl_bus_type);
|
|
|
|
if (rc)
|
2022-02-04 23:18:31 +08:00
|
|
|
goto err_bus;
|
|
|
|
|
2021-06-16 05:00:40 +08:00
|
|
|
rc = cxl_region_init();
|
|
|
|
if (rc)
|
|
|
|
goto err_region;
|
|
|
|
|
2021-08-03 01:30:05 +08:00
|
|
|
return 0;
|
|
|
|
|
2021-06-16 05:00:40 +08:00
|
|
|
err_region:
|
|
|
|
bus_unregister(&cxl_bus_type);
|
2022-02-04 23:18:31 +08:00
|
|
|
err_bus:
|
|
|
|
destroy_workqueue(cxl_bus_wq);
|
|
|
|
err_wq:
|
2021-08-03 01:30:05 +08:00
|
|
|
cxl_memdev_exit();
|
|
|
|
return rc;
|
2021-02-17 12:09:52 +08:00
|
|
|
}
|
|
|
|
|
2021-05-14 13:22:00 +08:00
|
|
|
static void cxl_core_exit(void)
|
2021-02-17 12:09:52 +08:00
|
|
|
{
|
2021-06-16 05:00:40 +08:00
|
|
|
cxl_region_exit();
|
2021-02-17 12:09:52 +08:00
|
|
|
bus_unregister(&cxl_bus_type);
|
2022-02-04 23:18:31 +08:00
|
|
|
destroy_workqueue(cxl_bus_wq);
|
2021-08-03 01:30:05 +08:00
|
|
|
cxl_memdev_exit();
|
2022-07-11 00:57:28 +08:00
|
|
|
debugfs_remove_recursive(cxl_debugfs);
|
2021-02-17 12:09:52 +08:00
|
|
|
}
|
|
|
|
|
2021-05-14 13:22:00 +08:00
|
|
|
module_init(cxl_core_init);
|
|
|
|
module_exit(cxl_core_exit);
|
2021-02-17 12:09:52 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|