mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
cxl fixes for v6.6-rc3
- Fix multiple scenarios where platform firmware defined regions fail to be assembled by the CXL core. - Fix a spurious driver-load failure on platforms that enable OS native AER, but not OS native CXL error handling. - Fix a regression detecting "poison" commands when "security" commands are also defined. - Fix a cxl_test regression with the move to centralize CXL port register enumeration in the CXL core. - Miscellaneous small fixes and cleanups -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQSbo+XnGs+rwLz9XGXfioYZHlFsZwUCZQ9ZeAAKCRDfioYZHlFs Z2b6AQDCNGMZdvJXwXW8LY/GHzJvuIWzvzSf0/Zy050Q1s4qrQEAqmmKCXtzjtMV PQLm9o3a96Wb/zSzRZJwMVCTyXClVwg= =sSix -----END PGP SIGNATURE----- Merge tag 'cxl-fixes-6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl Pull cxl fixes from Dan Williams: "A collection of regression fixes, bug fixes, and some small cleanups to the Compute Express Link code. The regressions arrived in the v6.5 dev cycle and missed the v6.6 merge window due to my personal absences this cycle. The most important fixes are for scenarios where the CXL subsystem fails to parse valid region configurations established by platform firmware. This is important because agreement between OS and BIOS on the CXL configuration is fundamental to implementing "OS native" error handling, i.e. address translation and component failure identification. Other important fixes are a driver load error when the BIOS lets the Linux PCI core handle AER events, but not CXL memory errors. The other fixex might have end user impact, but for now are only known to trigger in our test/emulation environment. Summary: - Fix multiple scenarios where platform firmware defined regions fail to be assembled by the CXL core. - Fix a spurious driver-load failure on platforms that enable OS native AER, but not OS native CXL error handling. - Fix a regression detecting "poison" commands when "security" commands are also defined. - Fix a cxl_test regression with the move to centralize CXL port register enumeration in the CXL core. - Miscellaneous small fixes and cleanups" * tag 'cxl-fixes-6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: cxl/acpi: Annotate struct cxl_cxims_data with __counted_by cxl/port: Fix cxl_test register enumeration regression cxl/region: Refactor granularity select in cxl_port_setup_targets() cxl/region: Match auto-discovered region decoders by HPA range cxl/mbox: Fix CEL logic for poison and security commands cxl/pci: Replace host_bridge->native_aer with pcie_aer_is_native() PCI/AER: Export pcie_aer_is_native() cxl/pci: Fix appropriate checking for _OSC while handling CXL RAS registers
This commit is contained in:
commit
2ad78f8cee
@ -14,7 +14,7 @@
|
||||
|
||||
struct cxl_cxims_data {
|
||||
int nr_maps;
|
||||
u64 xormaps[];
|
||||
u64 xormaps[] __counted_by(nr_maps);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -112,9 +112,9 @@ static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
|
||||
GFP_KERNEL);
|
||||
if (!cximsd)
|
||||
return -ENOMEM;
|
||||
cximsd->nr_maps = nr_maps;
|
||||
memcpy(cximsd->xormaps, cxims->xormap_list,
|
||||
nr_maps * sizeof(*cximsd->xormaps));
|
||||
cximsd->nr_maps = nr_maps;
|
||||
cxlrd->platform_data = cximsd;
|
||||
|
||||
return 0;
|
||||
|
@ -715,24 +715,25 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
|
||||
for (i = 0; i < cel_entries; i++) {
|
||||
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
|
||||
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
||||
int enabled = 0;
|
||||
|
||||
if (!cmd && (!cxl_is_poison_command(opcode) ||
|
||||
!cxl_is_security_command(opcode))) {
|
||||
dev_dbg(dev,
|
||||
"Opcode 0x%04x unsupported by driver\n", opcode);
|
||||
continue;
|
||||
if (cmd) {
|
||||
set_bit(cmd->info.id, mds->enabled_cmds);
|
||||
enabled++;
|
||||
}
|
||||
|
||||
if (cmd)
|
||||
set_bit(cmd->info.id, mds->enabled_cmds);
|
||||
|
||||
if (cxl_is_poison_command(opcode))
|
||||
if (cxl_is_poison_command(opcode)) {
|
||||
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
|
||||
enabled++;
|
||||
}
|
||||
|
||||
if (cxl_is_security_command(opcode))
|
||||
if (cxl_is_security_command(opcode)) {
|
||||
cxl_set_security_cmd_enabled(&mds->security, opcode);
|
||||
enabled++;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
|
||||
dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
|
||||
enabled ? "enabled" : "unsupported by driver");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/memregion.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/debugfs.h>
|
||||
@ -706,16 +707,20 @@ static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
|
||||
return cxl_setup_regs(map);
|
||||
}
|
||||
|
||||
static inline int cxl_port_setup_regs(struct cxl_port *port,
|
||||
resource_size_t component_reg_phys)
|
||||
static int cxl_port_setup_regs(struct cxl_port *port,
|
||||
resource_size_t component_reg_phys)
|
||||
{
|
||||
if (dev_is_platform(port->uport_dev))
|
||||
return 0;
|
||||
return cxl_setup_comp_regs(&port->dev, &port->comp_map,
|
||||
component_reg_phys);
|
||||
}
|
||||
|
||||
static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
|
||||
resource_size_t component_reg_phys)
|
||||
static int cxl_dport_setup_regs(struct cxl_dport *dport,
|
||||
resource_size_t component_reg_phys)
|
||||
{
|
||||
if (dev_is_platform(dport->dport_dev))
|
||||
return 0;
|
||||
return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
|
||||
component_reg_phys);
|
||||
}
|
||||
|
@ -717,13 +717,35 @@ static int match_free_decoder(struct device *dev, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int match_auto_decoder(struct device *dev, void *data)
|
||||
{
|
||||
struct cxl_region_params *p = data;
|
||||
struct cxl_decoder *cxld;
|
||||
struct range *r;
|
||||
|
||||
if (!is_switch_decoder(dev))
|
||||
return 0;
|
||||
|
||||
cxld = to_cxl_decoder(dev);
|
||||
r = &cxld->hpa_range;
|
||||
|
||||
if (p->res && p->res->start == r->start && p->res->end == r->end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
|
||||
struct cxl_region *cxlr)
|
||||
{
|
||||
struct device *dev;
|
||||
int id = 0;
|
||||
|
||||
dev = device_find_child(&port->dev, &id, match_free_decoder);
|
||||
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
|
||||
dev = device_find_child(&port->dev, &cxlr->params,
|
||||
match_auto_decoder);
|
||||
else
|
||||
dev = device_find_child(&port->dev, &id, match_free_decoder);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
/*
|
||||
@ -1154,16 +1176,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
|
||||
}
|
||||
|
||||
/*
|
||||
* If @parent_port is masking address bits, pick the next unused address
|
||||
* bit to route @port's targets.
|
||||
* Interleave granularity is a multiple of @parent_port granularity.
|
||||
* Multiplier is the parent port interleave ways.
|
||||
*/
|
||||
if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
|
||||
u32 address_bit = max(peig + peiw, eiw + peig);
|
||||
|
||||
eig = address_bit - eiw + 1;
|
||||
} else {
|
||||
eiw = peiw;
|
||||
eig = peig;
|
||||
rc = granularity_to_eig(parent_ig * parent_iw, &eig);
|
||||
if (rc) {
|
||||
dev_dbg(&cxlr->dev,
|
||||
"%s: invalid granularity calculation (%d * %d)\n",
|
||||
dev_name(&parent_port->dev), parent_ig, parent_iw);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = eig_to_granularity(eig, &ig);
|
||||
|
@ -529,7 +529,6 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
|
||||
|
||||
static int cxl_pci_ras_unmask(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
|
||||
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
|
||||
void __iomem *addr;
|
||||
u32 orig_val, val, mask;
|
||||
@ -541,9 +540,9 @@ static int cxl_pci_ras_unmask(struct pci_dev *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* BIOS has CXL error control */
|
||||
if (!host_bridge->native_cxl_error)
|
||||
return -ENXIO;
|
||||
/* BIOS has PCIe AER error control */
|
||||
if (!pcie_aer_is_native(pdev))
|
||||
return 0;
|
||||
|
||||
rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
|
||||
if (rc)
|
||||
|
@ -229,6 +229,7 @@ int pcie_aer_is_native(struct pci_dev *dev)
|
||||
|
||||
return pcie_ports_native || host->native_aer;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
|
||||
|
||||
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -29,10 +29,8 @@ extern bool pcie_ports_dpc_native;
|
||||
|
||||
#ifdef CONFIG_PCIEAER
|
||||
int pcie_aer_init(void);
|
||||
int pcie_aer_is_native(struct pci_dev *dev);
|
||||
#else
|
||||
static inline int pcie_aer_init(void) { return 0; }
|
||||
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_PCI_PCIE
|
||||
|
@ -42,11 +42,13 @@ struct aer_capability_regs {
|
||||
|
||||
#if defined(CONFIG_PCIEAER)
|
||||
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
|
||||
int pcie_aer_is_native(struct pci_dev *dev);
|
||||
#else
|
||||
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
void cper_print_aer(struct pci_dev *dev, int aer_severity,
|
||||
|
Loading…
Reference in New Issue
Block a user