mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
cxl fixes for v6.12-rc6
- Fix crashes when running with cxl-test code - Fix Trace DRAM Event Record field decodes - Fix module/built in initialization order errors - Fix use after free on decoder shutdowns - Fix out of order decoder allocations - Improve cxl-test to better reflect real world systems -----BEGIN PGP SIGNATURE----- iIoEABYKADIWIQSgX9xt+GwmrJEQ+euebuN7TNx1MQUCZyUaZhQcaXJhLndlaW55 QGludGVsLmNvbQAKCRCebuN7TNx1MRHmAP40P0fHjUxtA3FxxPiJLOouUUEfMzyr z2hstFw6gh1CNgEA4a4cOyDSe41ptqGFRIHfPeay7l8MdYtFV3iiYeDPnAA= =awb7 -----END PGP SIGNATURE----- Merge tag 'cxl-fixes-6.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl Pull cxl fixes from Ira Weiny: "The bulk of these fixes center around an initialization order bug reported by Gregory Price and some additional fall out from the debugging effort. In summary, cxl_acpi and cxl_mem race and previously worked because of a bus_rescan_devices() while testing without modules built in. Unfortunately with modules built in the rescan would fail due to the cxl_port driver being registered late via the build order. Furthermore it was found bus_rescan_devices() did not guarantee a probe barrier which CXL was expecting. Additional fixes to cxl-test and decoder allocation came along as they were found in this debugging effort. The other fixes are pretty minor but one affects trace point data seen by user space. Summary: - Fix crashes when running with cxl-test code - Fix Trace DRAM Event Record field decodes - Fix module/built in initialization order errors - Fix use after free on decoder shutdowns - Fix out of order decoder allocations - Improve cxl-test to better reflect real world systems" * tag 'cxl-fixes-6.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: cxl/test: Improve init-order fidelity relative to real-world systems cxl/port: Prevent out-of-order decoder allocation cxl/port: Fix use-after-free, permit out-of-order decoder shutdown cxl/acpi: Ensure ports ready at cxl_acpi_probe() return cxl/port: Fix cxl_bus_rescan() vs bus_rescan_devices() cxl/port: Fix CXL port initialization order when the subsystem is built-in cxl/events: Fix Trace DRAM Event Record cxl/core: Return error when cxl_endpoint_gather_bandwidth() handles a non-PCI device
This commit is contained in:
commit
b1966a1fd2
@ -4037,6 +4037,41 @@ int device_for_each_child_reverse(struct device *parent, void *data,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
|
||||
|
||||
/**
|
||||
* device_for_each_child_reverse_from - device child iterator in reversed order.
|
||||
* @parent: parent struct device.
|
||||
* @from: optional starting point in child list
|
||||
* @fn: function to be called for each device.
|
||||
* @data: data for the callback.
|
||||
*
|
||||
* Iterate over @parent's child devices, starting at @from, and call @fn
|
||||
* for each, passing it @data. This helper is identical to
|
||||
* device_for_each_child_reverse() when @from is NULL.
|
||||
*
|
||||
* @fn is checked each iteration. If it returns anything other than 0,
|
||||
* iteration stop and that value is returned to the caller of
|
||||
* device_for_each_child_reverse_from();
|
||||
*/
|
||||
int device_for_each_child_reverse_from(struct device *parent,
|
||||
struct device *from, const void *data,
|
||||
int (*fn)(struct device *, const void *))
|
||||
{
|
||||
struct klist_iter i;
|
||||
struct device *child;
|
||||
int error = 0;
|
||||
|
||||
if (!parent->p)
|
||||
return 0;
|
||||
|
||||
klist_iter_init_node(&parent->p->klist_children, &i,
|
||||
(from ? &from->p->knode_parent : NULL));
|
||||
while ((child = prev_device(&i)) && !error)
|
||||
error = fn(child, data);
|
||||
klist_iter_exit(&i);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
|
||||
|
||||
/**
|
||||
* device_find_child - device iterator for locating a particular device.
|
||||
* @parent: parent struct device
|
||||
|
@ -60,6 +60,7 @@ config CXL_ACPI
|
||||
default CXL_BUS
|
||||
select ACPI_TABLE_LIB
|
||||
select ACPI_HMAT
|
||||
select CXL_PORT
|
||||
help
|
||||
Enable support for host managed device memory (HDM) resources
|
||||
published by a platform's ACPI CXL memory layout description. See
|
||||
|
@ -1,13 +1,21 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# Order is important here for the built-in case:
|
||||
# - 'core' first for fundamental init
|
||||
# - 'port' before platform root drivers like 'acpi' so that CXL-root ports
|
||||
# are immediately enabled
|
||||
# - 'mem' and 'pmem' before endpoint drivers so that memdevs are
|
||||
# immediately enabled
|
||||
# - 'pci' last, also mirrors the hardware enumeration hierarchy
|
||||
obj-y += core/
|
||||
obj-$(CONFIG_CXL_PCI) += cxl_pci.o
|
||||
obj-$(CONFIG_CXL_MEM) += cxl_mem.o
|
||||
obj-$(CONFIG_CXL_PORT) += cxl_port.o
|
||||
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
|
||||
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
|
||||
obj-$(CONFIG_CXL_PORT) += cxl_port.o
|
||||
obj-$(CONFIG_CXL_MEM) += cxl_mem.o
|
||||
obj-$(CONFIG_CXL_PCI) += cxl_pci.o
|
||||
|
||||
cxl_mem-y := mem.o
|
||||
cxl_pci-y := pci.o
|
||||
cxl_port-y := port.o
|
||||
cxl_acpi-y := acpi.o
|
||||
cxl_pmem-y := pmem.o security.o
|
||||
cxl_port-y := port.o
|
||||
cxl_mem-y := mem.o
|
||||
cxl_pci-y := pci.o
|
||||
|
@ -924,6 +924,13 @@ static void __exit cxl_acpi_exit(void)
|
||||
|
||||
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
|
||||
subsys_initcall(cxl_acpi_init);
|
||||
|
||||
/*
|
||||
* Arrange for host-bridge ports to be active synchronous with
|
||||
* cxl_acpi_probe() exit.
|
||||
*/
|
||||
MODULE_SOFTDEP("pre: cxl_port");
|
||||
|
||||
module_exit(cxl_acpi_exit);
|
||||
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -641,6 +641,9 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
|
||||
void *ptr;
|
||||
int rc;
|
||||
|
||||
if (!dev_is_pci(cxlds->dev))
|
||||
return -ENODEV;
|
||||
|
||||
if (cxlds->rcd)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -712,7 +712,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_decoder_reset(struct cxl_decoder *cxld)
|
||||
static int commit_reap(struct device *dev, const void *data)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(dev->parent);
|
||||
struct cxl_decoder *cxld;
|
||||
|
||||
if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
|
||||
return 0;
|
||||
|
||||
cxld = to_cxl_decoder(dev);
|
||||
if (port->commit_end == cxld->id &&
|
||||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
|
||||
port->commit_end--;
|
||||
dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
|
||||
dev_name(&cxld->dev), port->commit_end);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_port_commit_reap(struct cxl_decoder *cxld)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
||||
|
||||
lockdep_assert_held_write(&cxl_region_rwsem);
|
||||
|
||||
/*
|
||||
* Once the highest committed decoder is disabled, free any other
|
||||
* decoders that were pinned allocated by out-of-order release.
|
||||
*/
|
||||
port->commit_end--;
|
||||
dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
|
||||
port->commit_end);
|
||||
device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
|
||||
commit_reap);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL);
|
||||
|
||||
static void cxl_decoder_reset(struct cxl_decoder *cxld)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
||||
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
|
||||
@ -721,14 +758,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
|
||||
u32 ctrl;
|
||||
|
||||
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (port->commit_end != id) {
|
||||
if (port->commit_end == id)
|
||||
cxl_port_commit_reap(cxld);
|
||||
else
|
||||
dev_dbg(&port->dev,
|
||||
"%s: out of order reset, expected decoder%d.%d\n",
|
||||
dev_name(&cxld->dev), port->id, port->commit_end);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
down_read(&cxl_dpa_rwsem);
|
||||
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
|
||||
@ -741,7 +778,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
|
||||
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
|
||||
port->commit_end--;
|
||||
cxld->flags &= ~CXL_DECODER_F_ENABLE;
|
||||
|
||||
/* Userspace is now responsible for reconfiguring this decoder */
|
||||
@ -751,8 +787,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
|
||||
cxled = to_cxl_endpoint_decoder(&cxld->dev);
|
||||
cxled->state = CXL_DECODER_STATE_MANUAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_setup_hdm_decoder_from_dvsec(
|
||||
|
@ -2084,11 +2084,18 @@ static void cxl_bus_remove(struct device *dev)
|
||||
|
||||
static struct workqueue_struct *cxl_bus_wq;
|
||||
|
||||
static int cxl_rescan_attach(struct device *dev, void *data)
|
||||
{
|
||||
int rc = device_attach(dev);
|
||||
|
||||
dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cxl_bus_rescan_queue(struct work_struct *w)
|
||||
{
|
||||
int rc = bus_rescan_devices(&cxl_bus_type);
|
||||
|
||||
pr_debug("CXL bus rescan result: %d\n", rc);
|
||||
bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
|
||||
}
|
||||
|
||||
void cxl_bus_rescan(void)
|
||||
|
@ -232,8 +232,8 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
||||
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
|
||||
return 0;
|
||||
} else {
|
||||
dev_err(&cxlr->dev,
|
||||
"Failed to synchronize CPU cache state\n");
|
||||
dev_WARN(&cxlr->dev,
|
||||
"Failed to synchronize CPU cache state\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
@ -242,19 +242,17 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
{
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
int i, rc = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Before region teardown attempt to flush, and if the flush
|
||||
* fails cancel the region teardown for data consistency
|
||||
* concerns
|
||||
* Before region teardown attempt to flush, evict any data cached for
|
||||
* this region, or scream loudly about missing arch / platform support
|
||||
* for CXL teardown.
|
||||
*/
|
||||
rc = cxl_region_invalidate_memregion(cxlr);
|
||||
if (rc)
|
||||
return rc;
|
||||
cxl_region_invalidate_memregion(cxlr);
|
||||
|
||||
for (i = count - 1; i >= 0; i--) {
|
||||
struct cxl_endpoint_decoder *cxled = p->targets[i];
|
||||
@ -277,23 +275,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
cxl_rr = cxl_rr_load(iter, cxlr);
|
||||
cxld = cxl_rr->decoder;
|
||||
if (cxld->reset)
|
||||
rc = cxld->reset(cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
cxld->reset(cxld);
|
||||
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
}
|
||||
|
||||
endpoint_reset:
|
||||
rc = cxled->cxld.reset(&cxled->cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
cxled->cxld.reset(&cxled->cxld);
|
||||
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
}
|
||||
|
||||
/* all decoders associated with this region have been torn down */
|
||||
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int commit_decoder(struct cxl_decoder *cxld)
|
||||
@ -409,16 +401,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
|
||||
* still pending.
|
||||
*/
|
||||
if (p->state == CXL_CONFIG_RESET_PENDING) {
|
||||
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
|
||||
/*
|
||||
* Revert to committed since there may still be active
|
||||
* decoders associated with this region, or move forward
|
||||
* to active to mark the reset successful
|
||||
*/
|
||||
if (rc)
|
||||
p->state = CXL_CONFIG_COMMIT;
|
||||
else
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
cxl_region_decode_reset(cxlr, p->interleave_ways);
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -794,26 +778,50 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int check_commit_order(struct device *dev, const void *data)
|
||||
{
|
||||
struct cxl_decoder *cxld = to_cxl_decoder(dev);
|
||||
|
||||
/*
|
||||
* if port->commit_end is not the only free decoder, then out of
|
||||
* order shutdown has occurred, block further allocations until
|
||||
* that is resolved
|
||||
*/
|
||||
if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int match_free_decoder(struct device *dev, void *data)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(dev->parent);
|
||||
struct cxl_decoder *cxld;
|
||||
int *id = data;
|
||||
int rc;
|
||||
|
||||
if (!is_switch_decoder(dev))
|
||||
return 0;
|
||||
|
||||
cxld = to_cxl_decoder(dev);
|
||||
|
||||
/* enforce ordered allocation */
|
||||
if (cxld->id != *id)
|
||||
if (cxld->id != port->commit_end + 1)
|
||||
return 0;
|
||||
|
||||
if (!cxld->region)
|
||||
return 1;
|
||||
if (cxld->region) {
|
||||
dev_dbg(dev->parent,
|
||||
"next decoder to commit (%s) is already reserved (%s)\n",
|
||||
dev_name(dev), dev_name(&cxld->region->dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
(*id)++;
|
||||
|
||||
return 0;
|
||||
rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
|
||||
check_commit_order);
|
||||
if (rc) {
|
||||
dev_dbg(dev->parent,
|
||||
"unable to allocate %s due to out of order shutdown\n",
|
||||
dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int match_auto_decoder(struct device *dev, void *data)
|
||||
@ -840,7 +848,6 @@ cxl_region_find_decoder(struct cxl_port *port,
|
||||
struct cxl_region *cxlr)
|
||||
{
|
||||
struct device *dev;
|
||||
int id = 0;
|
||||
|
||||
if (port == cxled_to_port(cxled))
|
||||
return &cxled->cxld;
|
||||
@ -849,7 +856,7 @@ cxl_region_find_decoder(struct cxl_port *port,
|
||||
dev = device_find_child(&port->dev, &cxlr->params,
|
||||
match_auto_decoder);
|
||||
else
|
||||
dev = device_find_child(&port->dev, &id, match_free_decoder);
|
||||
dev = device_find_child(&port->dev, NULL, match_free_decoder);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
/*
|
||||
@ -2054,13 +2061,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
|
||||
get_device(&cxlr->dev);
|
||||
|
||||
if (p->state > CXL_CONFIG_ACTIVE) {
|
||||
/*
|
||||
* TODO: tear down all impacted regions if a device is
|
||||
* removed out of order
|
||||
*/
|
||||
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
|
||||
if (rc)
|
||||
goto out;
|
||||
cxl_region_decode_reset(cxlr, p->interleave_ways);
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
}
|
||||
|
||||
|
@ -279,7 +279,7 @@ TRACE_EVENT(cxl_generic_event,
|
||||
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
|
||||
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
|
||||
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
|
||||
#define show_mem_event_type(type) __print_symbolic(type, \
|
||||
#define show_gmer_mem_event_type(type) __print_symbolic(type, \
|
||||
{ CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
|
||||
{ CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
|
||||
{ CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
|
||||
@ -373,7 +373,7 @@ TRACE_EVENT(cxl_general_media,
|
||||
"hpa=%llx region=%s region_uuid=%pUb",
|
||||
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
|
||||
show_event_desc_flags(__entry->descriptor),
|
||||
show_mem_event_type(__entry->type),
|
||||
show_gmer_mem_event_type(__entry->type),
|
||||
show_trans_type(__entry->transaction_type),
|
||||
__entry->channel, __entry->rank, __entry->device,
|
||||
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
|
||||
@ -391,6 +391,17 @@ TRACE_EVENT(cxl_general_media,
|
||||
* DRAM Event Record defines many fields the same as the General Media Event
|
||||
* Record. Reuse those definitions as appropriate.
|
||||
*/
|
||||
#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00
|
||||
#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
|
||||
#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
|
||||
#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
|
||||
#define show_dram_mem_event_type(type) __print_symbolic(type, \
|
||||
{ CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
|
||||
{ CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
|
||||
{ CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
|
||||
{ CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
|
||||
)
|
||||
|
||||
#define CXL_DER_VALID_CHANNEL BIT(0)
|
||||
#define CXL_DER_VALID_RANK BIT(1)
|
||||
#define CXL_DER_VALID_NIBBLE BIT(2)
|
||||
@ -477,7 +488,7 @@ TRACE_EVENT(cxl_dram,
|
||||
"hpa=%llx region=%s region_uuid=%pUb",
|
||||
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
|
||||
show_event_desc_flags(__entry->descriptor),
|
||||
show_mem_event_type(__entry->type),
|
||||
show_dram_mem_event_type(__entry->type),
|
||||
show_trans_type(__entry->transaction_type),
|
||||
__entry->channel, __entry->rank, __entry->nibble_mask,
|
||||
__entry->bank_group, __entry->bank,
|
||||
|
@ -359,7 +359,7 @@ struct cxl_decoder {
|
||||
struct cxl_region *region;
|
||||
unsigned long flags;
|
||||
int (*commit)(struct cxl_decoder *cxld);
|
||||
int (*reset)(struct cxl_decoder *cxld);
|
||||
void (*reset)(struct cxl_decoder *cxld);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -730,6 +730,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
|
||||
int cxl_num_decoders_committed(struct cxl_port *port);
|
||||
bool is_cxl_port(const struct device *dev);
|
||||
struct cxl_port *to_cxl_port(const struct device *dev);
|
||||
void cxl_port_commit_reap(struct cxl_decoder *cxld);
|
||||
struct pci_bus;
|
||||
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
|
||||
struct pci_bus *bus);
|
||||
|
@ -208,7 +208,22 @@ static struct cxl_driver cxl_port_driver = {
|
||||
},
|
||||
};
|
||||
|
||||
module_cxl_driver(cxl_port_driver);
|
||||
static int __init cxl_port_init(void)
|
||||
{
|
||||
return cxl_driver_register(&cxl_port_driver);
|
||||
}
|
||||
/*
|
||||
* Be ready to immediately enable ports emitted by the platform CXL root
|
||||
* (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
|
||||
*/
|
||||
subsys_initcall(cxl_port_init);
|
||||
|
||||
static void __exit cxl_port_exit(void)
|
||||
{
|
||||
cxl_driver_unregister(&cxl_port_driver);
|
||||
}
|
||||
module_exit(cxl_port_exit);
|
||||
|
||||
MODULE_DESCRIPTION("CXL: Port enumeration and services");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_IMPORT_NS(CXL);
|
||||
|
@ -1078,6 +1078,9 @@ int device_for_each_child(struct device *dev, void *data,
|
||||
int (*fn)(struct device *dev, void *data));
|
||||
int device_for_each_child_reverse(struct device *dev, void *data,
|
||||
int (*fn)(struct device *dev, void *data));
|
||||
int device_for_each_child_reverse_from(struct device *parent,
|
||||
struct device *from, const void *data,
|
||||
int (*fn)(struct device *, const void *));
|
||||
struct device *device_find_child(struct device *dev, void *data,
|
||||
int (*match)(struct device *dev, void *data));
|
||||
struct device *device_find_child_by_name(struct device *parent,
|
||||
|
@ -693,26 +693,22 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_decoder_reset(struct cxl_decoder *cxld)
|
||||
static void mock_decoder_reset(struct cxl_decoder *cxld)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
||||
int id = cxld->id;
|
||||
|
||||
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
|
||||
if (port->commit_end != id) {
|
||||
if (port->commit_end == id)
|
||||
cxl_port_commit_reap(cxld);
|
||||
else
|
||||
dev_dbg(&port->dev,
|
||||
"%s: out of order reset, expected decoder%d.%d\n",
|
||||
dev_name(&cxld->dev), port->id, port->commit_end);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
port->commit_end--;
|
||||
cxld->flags &= ~CXL_DECODER_F_ENABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void default_mock_decoder(struct cxl_decoder *cxld)
|
||||
@ -1062,7 +1058,7 @@ static void mock_companion(struct acpi_device *adev, struct device *dev)
|
||||
#define SZ_64G (SZ_32G * 2)
|
||||
#endif
|
||||
|
||||
static __init int cxl_rch_init(void)
|
||||
static __init int cxl_rch_topo_init(void)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
@ -1090,30 +1086,8 @@ static __init int cxl_rch_init(void)
|
||||
goto err_bridge;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
|
||||
int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
|
||||
struct platform_device *rch = cxl_rch[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_rcd", idx);
|
||||
if (!pdev)
|
||||
goto err_mem;
|
||||
pdev->dev.parent = &rch->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_mem;
|
||||
}
|
||||
cxl_rcd[i] = pdev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_mem:
|
||||
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_rcd[i]);
|
||||
err_bridge:
|
||||
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
|
||||
struct platform_device *pdev = cxl_rch[i];
|
||||
@ -1127,12 +1101,10 @@ err_bridge:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cxl_rch_exit(void)
|
||||
static void cxl_rch_topo_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_rcd[i]);
|
||||
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
|
||||
struct platform_device *pdev = cxl_rch[i];
|
||||
|
||||
@ -1143,7 +1115,7 @@ static void cxl_rch_exit(void)
|
||||
}
|
||||
}
|
||||
|
||||
static __init int cxl_single_init(void)
|
||||
static __init int cxl_single_topo_init(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
@ -1228,29 +1200,8 @@ static __init int cxl_single_init(void)
|
||||
cxl_swd_single[i] = pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
|
||||
struct platform_device *dport = cxl_swd_single[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
|
||||
if (!pdev)
|
||||
goto err_mem;
|
||||
pdev->dev.parent = &dport->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_mem;
|
||||
}
|
||||
cxl_mem_single[i] = pdev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_mem:
|
||||
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem_single[i]);
|
||||
err_dport:
|
||||
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_swd_single[i]);
|
||||
@ -1273,12 +1224,10 @@ err_bridge:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cxl_single_exit(void)
|
||||
static void cxl_single_topo_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem_single[i]);
|
||||
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_swd_single[i]);
|
||||
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
|
||||
@ -1295,6 +1244,91 @@ static void cxl_single_exit(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void cxl_mem_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_rcd[i]);
|
||||
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem_single[i]);
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
}
|
||||
|
||||
static int cxl_mem_init(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
|
||||
struct platform_device *dport = cxl_switch_dport[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_mem", i);
|
||||
if (!pdev)
|
||||
goto err_mem;
|
||||
pdev->dev.parent = &dport->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_mem;
|
||||
}
|
||||
cxl_mem[i] = pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
|
||||
struct platform_device *dport = cxl_swd_single[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
|
||||
if (!pdev)
|
||||
goto err_single;
|
||||
pdev->dev.parent = &dport->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_single;
|
||||
}
|
||||
cxl_mem_single[i] = pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
|
||||
int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
|
||||
struct platform_device *rch = cxl_rch[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_rcd", idx);
|
||||
if (!pdev)
|
||||
goto err_rcd;
|
||||
pdev->dev.parent = &rch->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_rcd;
|
||||
}
|
||||
cxl_rcd[i] = pdev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_rcd:
|
||||
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_rcd[i]);
|
||||
err_single:
|
||||
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem_single[i]);
|
||||
err_mem:
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static __init int cxl_test_init(void)
|
||||
{
|
||||
int rc, i;
|
||||
@ -1407,29 +1441,11 @@ static __init int cxl_test_init(void)
|
||||
cxl_switch_dport[i] = pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
|
||||
struct platform_device *dport = cxl_switch_dport[i];
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_alloc("cxl_mem", i);
|
||||
if (!pdev)
|
||||
goto err_mem;
|
||||
pdev->dev.parent = &dport->dev;
|
||||
set_dev_node(&pdev->dev, i % 2);
|
||||
|
||||
rc = platform_device_add(pdev);
|
||||
if (rc) {
|
||||
platform_device_put(pdev);
|
||||
goto err_mem;
|
||||
}
|
||||
cxl_mem[i] = pdev;
|
||||
}
|
||||
|
||||
rc = cxl_single_init();
|
||||
rc = cxl_single_topo_init();
|
||||
if (rc)
|
||||
goto err_mem;
|
||||
goto err_dport;
|
||||
|
||||
rc = cxl_rch_init();
|
||||
rc = cxl_rch_topo_init();
|
||||
if (rc)
|
||||
goto err_single;
|
||||
|
||||
@ -1442,19 +1458,20 @@ static __init int cxl_test_init(void)
|
||||
|
||||
rc = platform_device_add(cxl_acpi);
|
||||
if (rc)
|
||||
goto err_add;
|
||||
goto err_root;
|
||||
|
||||
rc = cxl_mem_init();
|
||||
if (rc)
|
||||
goto err_root;
|
||||
|
||||
return 0;
|
||||
|
||||
err_add:
|
||||
err_root:
|
||||
platform_device_put(cxl_acpi);
|
||||
err_rch:
|
||||
cxl_rch_exit();
|
||||
cxl_rch_topo_exit();
|
||||
err_single:
|
||||
cxl_single_exit();
|
||||
err_mem:
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
cxl_single_topo_exit();
|
||||
err_dport:
|
||||
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_switch_dport[i]);
|
||||
@ -1486,11 +1503,10 @@ static __exit void cxl_test_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
cxl_mem_exit();
|
||||
platform_device_unregister(cxl_acpi);
|
||||
cxl_rch_exit();
|
||||
cxl_single_exit();
|
||||
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_mem[i]);
|
||||
cxl_rch_topo_exit();
|
||||
cxl_single_topo_exit();
|
||||
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
|
||||
platform_device_unregister(cxl_switch_dport[i]);
|
||||
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
|
||||
|
@ -1673,6 +1673,7 @@ static struct platform_driver cxl_mock_mem_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.dev_groups = cxl_mock_mem_groups,
|
||||
.groups = cxl_mock_mem_core_groups,
|
||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||
},
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user