2022-02-02 04:24:30 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
|
|
|
|
#include <linux/io-64-nonatomic-hi-lo.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
|
|
|
|
#include "cxlmem.h"
|
|
|
|
#include "core.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: cxl core hdm
|
|
|
|
*
|
|
|
|
* Compute Express Link Host Managed Device Memory, starting with the
|
|
|
|
* CXL 2.0 specification, is managed by an array of HDM Decoder register
|
|
|
|
* instances per CXL port and per CXL endpoint. Define common helpers
|
|
|
|
* for enumerating these registers and capabilities.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|
|
|
int *target_map)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = cxl_decoder_add_locked(cxld, target_map);
|
|
|
|
if (rc) {
|
|
|
|
put_device(&cxld->dev);
|
|
|
|
dev_err(&port->dev, "Failed to add decoder\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = cxl_decoder_autoremove(&port->dev, cxld);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
|
|
|
|
* single ported host-bridges need not publish a decoder capability when a
|
|
|
|
* passthrough decode can be assumed, i.e. all transactions that the uport sees
|
|
|
|
* are claimed and passed to the single dport. Disable the range until the first
|
|
|
|
* CXL region is enumerated / activated.
|
|
|
|
*/
|
2022-02-02 05:23:14 +08:00
|
|
|
int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
|
2022-02-02 04:24:30 +08:00
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
struct cxl_dport *dport;
|
|
|
|
int single_port_map[1];
|
|
|
|
|
|
|
|
cxld = cxl_switch_decoder_alloc(port, 1);
|
|
|
|
if (IS_ERR(cxld))
|
|
|
|
return PTR_ERR(cxld);
|
|
|
|
|
|
|
|
device_lock_assert(&port->dev);
|
|
|
|
|
|
|
|
dport = list_first_entry(&port->dports, typeof(*dport), list);
|
|
|
|
single_port_map[0] = dport->port_id;
|
|
|
|
|
|
|
|
return add_hdm_decoder(port, cxld, single_port_map);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
|
|
|
|
|
|
|
|
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
|
|
|
|
{
|
|
|
|
u32 hdm_cap;
|
|
|
|
|
|
|
|
hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
|
|
|
|
cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
|
|
|
|
cxlhdm->target_count =
|
|
|
|
FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
|
|
|
|
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
|
|
|
|
cxlhdm->interleave_mask |= GENMASK(11, 8);
|
|
|
|
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
|
|
|
|
cxlhdm->interleave_mask |= GENMASK(14, 12);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
|
|
|
|
void __iomem *crb)
|
|
|
|
{
|
|
|
|
struct cxl_component_reg_map map;
|
|
|
|
|
|
|
|
cxl_probe_component_regs(&port->dev, crb, &map);
|
|
|
|
if (!map.hdm_decoder.valid) {
|
|
|
|
dev_err(&port->dev, "HDM decoder registers invalid\n");
|
|
|
|
return IOMEM_ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
return crb + map.hdm_decoder.offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_cxl_setup_hdm - map HDM decoder component registers
|
|
|
|
* @port: cxl_port to map
|
|
|
|
*/
|
2022-02-02 05:23:14 +08:00
|
|
|
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
|
2022-02-02 04:24:30 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &port->dev;
|
|
|
|
void __iomem *crb, *hdm;
|
|
|
|
struct cxl_hdm *cxlhdm;
|
|
|
|
|
2022-02-02 05:23:14 +08:00
|
|
|
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
|
2022-02-02 04:24:30 +08:00
|
|
|
if (!cxlhdm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
cxlhdm->port = port;
|
2022-02-02 05:23:14 +08:00
|
|
|
crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
|
2022-02-02 04:24:30 +08:00
|
|
|
CXL_COMPONENT_REG_BLOCK_SIZE);
|
|
|
|
if (!crb) {
|
|
|
|
dev_err(dev, "No component registers mapped\n");
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
hdm = map_hdm_decoder_regs(port, crb);
|
|
|
|
if (IS_ERR(hdm))
|
|
|
|
return ERR_CAST(hdm);
|
|
|
|
cxlhdm->regs.hdm_decoder = hdm;
|
|
|
|
|
|
|
|
parse_hdm_decoder_caps(cxlhdm);
|
|
|
|
if (cxlhdm->decoder_count == 0) {
|
|
|
|
dev_err(dev, "Spec violation. Caps invalid\n");
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
return cxlhdm;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
|
|
|
|
|
|
|
|
static int to_interleave_granularity(u32 ctrl)
|
|
|
|
{
|
|
|
|
int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
|
|
|
|
|
|
|
|
return 256 << val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int to_interleave_ways(u32 ctrl)
|
|
|
|
{
|
|
|
|
int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
|
|
|
|
|
|
|
|
switch (val) {
|
|
|
|
case 0 ... 4:
|
|
|
|
return 1 << val;
|
|
|
|
case 8 ... 10:
|
|
|
|
return 3 << (val - 8);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-26 13:24:09 +08:00
|
|
|
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|
|
|
int *target_map, void __iomem *hdm, int which)
|
2022-02-02 04:24:30 +08:00
|
|
|
{
|
|
|
|
u64 size, base;
|
|
|
|
u32 ctrl;
|
|
|
|
int i;
|
|
|
|
union {
|
|
|
|
u64 value;
|
|
|
|
unsigned char target_id[8];
|
|
|
|
} target_list;
|
|
|
|
|
|
|
|
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
|
|
|
|
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
|
|
|
|
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
|
|
|
|
|
|
|
|
if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
|
|
|
|
size = 0;
|
2022-01-26 13:24:09 +08:00
|
|
|
if (base == U64_MAX || size == U64_MAX) {
|
|
|
|
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
|
|
|
|
port->id, cxld->id);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2022-02-02 04:24:30 +08:00
|
|
|
|
2022-05-19 08:55:13 +08:00
|
|
|
cxld->hpa_range = (struct range) {
|
2022-02-02 04:24:30 +08:00
|
|
|
.start = base,
|
|
|
|
.end = base + size - 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* switch decoders are always enabled if committed */
|
|
|
|
if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
|
|
|
|
cxld->flags |= CXL_DECODER_F_ENABLE;
|
|
|
|
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
|
|
|
|
cxld->flags |= CXL_DECODER_F_LOCK;
|
|
|
|
}
|
|
|
|
cxld->interleave_ways = to_interleave_ways(ctrl);
|
2022-01-26 13:24:09 +08:00
|
|
|
if (!cxld->interleave_ways) {
|
|
|
|
dev_warn(&port->dev,
|
|
|
|
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
|
|
|
|
port->id, cxld->id, ctrl);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2022-02-02 04:24:30 +08:00
|
|
|
cxld->interleave_granularity = to_interleave_granularity(ctrl);
|
|
|
|
|
|
|
|
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
|
|
|
|
cxld->target_type = CXL_DECODER_EXPANDER;
|
|
|
|
else
|
|
|
|
cxld->target_type = CXL_DECODER_ACCELERATOR;
|
|
|
|
|
2022-03-05 05:36:45 +08:00
|
|
|
if (is_endpoint_decoder(&cxld->dev))
|
2022-01-26 13:24:09 +08:00
|
|
|
return 0;
|
2022-02-03 12:02:06 +08:00
|
|
|
|
2022-02-02 04:24:30 +08:00
|
|
|
target_list.value =
|
|
|
|
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
|
|
|
|
for (i = 0; i < cxld->interleave_ways; i++)
|
|
|
|
target_map[i] = target_list.target_id[i];
|
2022-01-26 13:24:09 +08:00
|
|
|
|
|
|
|
return 0;
|
2022-02-02 04:24:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_cxl_enumerate_decoders - add decoder objects per HDM register set
|
|
|
|
* @cxlhdm: Structure to populate with HDM capabilities
|
|
|
|
*/
|
2022-02-02 05:23:14 +08:00
|
|
|
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
|
2022-02-02 04:24:30 +08:00
|
|
|
{
|
|
|
|
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
|
|
|
|
struct cxl_port *port = cxlhdm->port;
|
2022-01-26 13:24:09 +08:00
|
|
|
int i, committed, failed;
|
2022-02-02 04:24:30 +08:00
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the register resource was recently claimed via request_region()
|
|
|
|
* be careful about trusting the "not-committed" status until the commit
|
|
|
|
* timeout has elapsed. The commit timeout is 10ms (CXL 2.0
|
|
|
|
* 8.2.5.12.20), but double it to be tolerant of any clock skew between
|
|
|
|
* host and target.
|
|
|
|
*/
|
|
|
|
for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
|
|
|
|
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
|
|
|
|
if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
|
|
|
|
committed++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ensure that future checks of committed can be trusted */
|
|
|
|
if (committed != cxlhdm->decoder_count)
|
|
|
|
msleep(20);
|
|
|
|
|
2022-01-26 13:24:09 +08:00
|
|
|
for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
|
2022-02-02 04:24:30 +08:00
|
|
|
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
|
|
|
|
int rc, target_count = cxlhdm->target_count;
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
|
2022-02-03 12:02:06 +08:00
|
|
|
if (is_cxl_endpoint(port))
|
|
|
|
cxld = cxl_endpoint_decoder_alloc(port);
|
|
|
|
else
|
|
|
|
cxld = cxl_switch_decoder_alloc(port, target_count);
|
2022-02-02 04:24:30 +08:00
|
|
|
if (IS_ERR(cxld)) {
|
|
|
|
dev_warn(&port->dev,
|
|
|
|
"Failed to allocate the decoder\n");
|
|
|
|
return PTR_ERR(cxld);
|
|
|
|
}
|
|
|
|
|
2022-04-29 02:15:40 +08:00
|
|
|
rc = init_hdm_decoder(port, cxld, target_map, hdm, i);
|
2022-01-26 13:24:09 +08:00
|
|
|
if (rc) {
|
|
|
|
put_device(&cxld->dev);
|
|
|
|
failed++;
|
|
|
|
continue;
|
|
|
|
}
|
2022-02-02 04:24:30 +08:00
|
|
|
rc = add_hdm_decoder(port, cxld, target_map);
|
|
|
|
if (rc) {
|
|
|
|
dev_warn(&port->dev,
|
|
|
|
"Failed to add decoder to port\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-26 13:24:09 +08:00
|
|
|
if (failed == cxlhdm->decoder_count) {
|
|
|
|
dev_err(&port->dev, "No valid decoders found\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2022-02-02 04:24:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
|