2021-02-17 12:09:51 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/* Copyright(c) 2020 Intel Corporation. */
|
|
|
|
|
|
|
|
#ifndef __CXL_H__
|
|
|
|
#define __CXL_H__
|
|
|
|
|
2021-06-16 07:18:17 +08:00
|
|
|
#include <linux/libnvdimm.h>
|
2021-02-17 12:09:51 +08:00
|
|
|
#include <linux/bitfield.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
/**
|
|
|
|
* DOC: cxl objects
|
|
|
|
*
|
|
|
|
* The CXL core objects like ports, decoders, and regions are shared
|
|
|
|
* between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
|
|
|
|
* (port-driver, region-driver, nvdimm object-drivers... etc).
|
|
|
|
*/
|
|
|
|
|
2021-05-28 08:49:22 +08:00
|
|
|
/* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
|
|
|
|
#define CXL_CM_OFFSET 0x1000
|
|
|
|
#define CXL_CM_CAP_HDR_OFFSET 0x0
|
|
|
|
#define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
|
|
|
|
#define CM_CAP_HDR_CAP_ID 1
|
|
|
|
#define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
|
|
|
|
#define CM_CAP_HDR_CAP_VERSION 1
|
|
|
|
#define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
|
|
|
|
#define CM_CAP_HDR_CACHE_MEM_VERSION 1
|
|
|
|
#define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
|
|
|
|
#define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
|
|
|
|
|
|
|
|
#define CXL_CM_CAP_CAP_ID_HDM 0x5
|
|
|
|
#define CXL_CM_CAP_CAP_HDM_VERSION 1
|
|
|
|
|
|
|
|
/* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
|
|
|
|
#define CXL_HDM_DECODER_CAP_OFFSET 0x0
|
|
|
|
#define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
|
|
|
|
#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
|
|
|
|
#define CXL_HDM_DECODER0_BASE_LOW_OFFSET 0x10
|
|
|
|
#define CXL_HDM_DECODER0_BASE_HIGH_OFFSET 0x14
|
|
|
|
#define CXL_HDM_DECODER0_SIZE_LOW_OFFSET 0x18
|
|
|
|
#define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET 0x1c
|
|
|
|
#define CXL_HDM_DECODER0_CTRL_OFFSET 0x20
|
|
|
|
|
2021-06-12 03:01:11 +08:00
|
|
|
static inline int cxl_hdm_decoder_count(u32 cap_hdr)
|
|
|
|
{
|
|
|
|
int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
|
|
|
|
|
|
|
|
return val ? val * 2 : 1;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
|
|
|
|
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
|
|
|
|
#define CXLDEV_CAP_ARRAY_CAP_ID 0
|
|
|
|
#define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
|
|
|
|
#define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
|
|
|
|
/* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
|
|
|
|
#define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
|
|
|
|
/* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
|
|
|
|
#define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
|
|
|
|
#define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
|
|
|
|
#define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
|
|
|
|
#define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
|
|
|
|
|
|
|
|
/* CXL 2.0 8.2.8.4 Mailbox Registers */
|
|
|
|
#define CXLDEV_MBOX_CAPS_OFFSET 0x00
|
|
|
|
#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
|
|
|
|
#define CXLDEV_MBOX_CTRL_OFFSET 0x04
|
|
|
|
#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
|
|
|
|
#define CXLDEV_MBOX_CMD_OFFSET 0x08
|
|
|
|
#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
|
|
|
|
#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
|
|
|
|
#define CXLDEV_MBOX_STATUS_OFFSET 0x10
|
|
|
|
#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
|
|
|
|
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
|
|
|
|
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
|
|
|
|
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
/*
|
2021-07-31 11:25:50 +08:00
|
|
|
* Using struct_group() allows for per register-block-type helper routines,
|
|
|
|
* without requiring block-type agnostic code to include the prefix.
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
*/
|
|
|
|
struct cxl_regs {
|
2021-07-31 11:25:50 +08:00
|
|
|
/*
|
|
|
|
* Common set of CXL Component register block base pointers
|
|
|
|
* @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
|
|
|
|
*/
|
|
|
|
struct_group_tagged(cxl_component_regs, component,
|
|
|
|
void __iomem *hdm_decoder;
|
|
|
|
);
|
|
|
|
/*
|
|
|
|
* Common set of CXL Device register block base pointers
|
|
|
|
* @status: CXL 2.0 8.2.8.3 Device Status Registers
|
|
|
|
* @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
|
|
|
|
* @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
|
|
|
|
*/
|
|
|
|
struct_group_tagged(cxl_device_regs, device_regs,
|
|
|
|
void __iomem *status, *mbox, *memdev;
|
|
|
|
);
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
};
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
struct cxl_reg_map {
|
|
|
|
bool valid;
|
|
|
|
unsigned long offset;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
2021-05-28 08:49:22 +08:00
|
|
|
struct cxl_component_reg_map {
|
|
|
|
struct cxl_reg_map hdm_decoder;
|
|
|
|
};
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
struct cxl_device_reg_map {
|
|
|
|
struct cxl_reg_map status;
|
|
|
|
struct cxl_reg_map mbox;
|
|
|
|
struct cxl_reg_map memdev;
|
|
|
|
};
|
|
|
|
|
2021-10-16 05:57:27 +08:00
|
|
|
/**
|
|
|
|
* struct cxl_register_map - DVSEC harvested register block mapping parameters
|
|
|
|
* @base: virtual base of the register-block-BAR + @block_offset
|
|
|
|
* @block_offset: offset to start of register block in @barno
|
|
|
|
* @reg_type: see enum cxl_regloc_type
|
|
|
|
* @barno: PCI BAR number containing the register block
|
|
|
|
* @component_map: cxl_reg_map for component registers
|
|
|
|
* @device_map: cxl_reg_maps for device registers
|
|
|
|
*/
|
2021-06-04 08:50:36 +08:00
|
|
|
struct cxl_register_map {
|
2021-10-16 05:57:27 +08:00
|
|
|
void __iomem *base;
|
2021-06-04 08:50:36 +08:00
|
|
|
u64 block_offset;
|
|
|
|
u8 reg_type;
|
|
|
|
u8 barno;
|
|
|
|
union {
|
2021-05-28 08:49:22 +08:00
|
|
|
struct cxl_component_reg_map component_map;
|
2021-06-04 08:50:36 +08:00
|
|
|
struct cxl_device_reg_map device_map;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2021-05-28 08:49:22 +08:00
|
|
|
void cxl_probe_component_regs(struct device *dev, void __iomem *base,
|
|
|
|
struct cxl_component_reg_map *map);
|
2021-06-04 08:50:36 +08:00
|
|
|
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
|
|
|
|
struct cxl_device_reg_map *map);
|
2021-05-28 08:49:22 +08:00
|
|
|
int cxl_map_component_regs(struct pci_dev *pdev,
|
|
|
|
struct cxl_component_regs *regs,
|
|
|
|
struct cxl_register_map *map);
|
2021-06-04 08:50:36 +08:00
|
|
|
int cxl_map_device_regs(struct pci_dev *pdev,
|
|
|
|
struct cxl_device_regs *regs,
|
|
|
|
struct cxl_register_map *map);
|
2021-05-14 13:22:05 +08:00
|
|
|
|
2022-01-24 08:29:10 +08:00
|
|
|
enum cxl_regloc_type;
|
|
|
|
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
|
|
|
|
struct cxl_register_map *map);
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
#define CXL_RESOURCE_NONE ((resource_size_t) -1)
|
2021-06-10 00:01:46 +08:00
|
|
|
#define CXL_TARGET_STRLEN 20
|
2021-06-10 00:01:35 +08:00
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
/*
|
|
|
|
* cxl_decoder flags that define the type of memory / devices this
|
|
|
|
* decoder supports as well as configuration lock status See "CXL 2.0
|
|
|
|
* 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
|
|
|
|
*/
|
|
|
|
#define CXL_DECODER_F_RAM BIT(0)
|
|
|
|
#define CXL_DECODER_F_PMEM BIT(1)
|
|
|
|
#define CXL_DECODER_F_TYPE2 BIT(2)
|
|
|
|
#define CXL_DECODER_F_TYPE3 BIT(3)
|
|
|
|
#define CXL_DECODER_F_LOCK BIT(4)
|
|
|
|
#define CXL_DECODER_F_MASK GENMASK(4, 0)
|
|
|
|
|
|
|
|
enum cxl_decoder_type {
|
|
|
|
CXL_DECODER_ACCELERATOR = 2,
|
|
|
|
CXL_DECODER_EXPANDER = 3,
|
|
|
|
};
|
|
|
|
|
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 13:13:10 +08:00
|
|
|
/*
|
|
|
|
* Current specification goes up to 8, double that seems a reasonable
|
|
|
|
* software max for the foreseeable future
|
|
|
|
*/
|
|
|
|
#define CXL_DECODER_MAX_INTERLEAVE 16
|
|
|
|
|
2021-06-10 00:43:29 +08:00
|
|
|
/**
|
|
|
|
* struct cxl_decoder - CXL address range decode configuration
|
|
|
|
* @dev: this decoder's device
|
|
|
|
* @id: kernel device name id
|
2022-01-24 08:29:31 +08:00
|
|
|
* @platform_res: address space resources considered by root decoder
|
|
|
|
* @decoder_range: address space resources considered by midlevel decoder
|
2021-06-10 00:43:29 +08:00
|
|
|
* @interleave_ways: number of cxl_dports in this decode
|
|
|
|
* @interleave_granularity: data stride per dport
|
|
|
|
* @target_type: accelerator vs expander (type2 vs type3) selector
|
|
|
|
* @flags: memory type capabilities and locking
|
2022-02-01 07:35:18 +08:00
|
|
|
* @target_lock: coordinate coherent reads of the target list
|
2021-09-22 03:22:16 +08:00
|
|
|
* @nr_targets: number of elements in @target
|
2021-06-10 00:43:29 +08:00
|
|
|
* @target: active ordered target list in current decoder configuration
|
|
|
|
*/
|
|
|
|
struct cxl_decoder {
|
|
|
|
struct device dev;
|
|
|
|
int id;
|
2022-01-24 08:29:31 +08:00
|
|
|
union {
|
|
|
|
struct resource platform_res;
|
|
|
|
struct range decoder_range;
|
|
|
|
};
|
2021-06-10 00:43:29 +08:00
|
|
|
int interleave_ways;
|
|
|
|
int interleave_granularity;
|
|
|
|
enum cxl_decoder_type target_type;
|
|
|
|
unsigned long flags;
|
2022-02-01 07:35:18 +08:00
|
|
|
seqlock_t target_lock;
|
2021-12-11 05:36:27 +08:00
|
|
|
int nr_targets;
|
2021-06-10 00:43:29 +08:00
|
|
|
struct cxl_dport *target[];
|
|
|
|
};
|
|
|
|
|
2021-06-16 07:18:17 +08:00
|
|
|
|
2021-11-12 02:19:05 +08:00
|
|
|
/**
|
|
|
|
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
|
|
|
|
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
|
|
|
|
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
|
|
|
|
* @CXL_NVB_ONLINE: Target state after successful ->probe()
|
|
|
|
* @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
|
|
|
|
*/
|
2021-06-16 07:18:17 +08:00
|
|
|
enum cxl_nvdimm_brige_state {
|
|
|
|
CXL_NVB_NEW,
|
|
|
|
CXL_NVB_DEAD,
|
|
|
|
CXL_NVB_ONLINE,
|
|
|
|
CXL_NVB_OFFLINE,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cxl_nvdimm_bridge {
|
2021-09-15 03:08:40 +08:00
|
|
|
int id;
|
2021-06-16 07:18:17 +08:00
|
|
|
struct device dev;
|
|
|
|
struct cxl_port *port;
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
struct nvdimm_bus_descriptor nd_desc;
|
|
|
|
struct work_struct state_work;
|
|
|
|
enum cxl_nvdimm_brige_state state;
|
|
|
|
};
|
|
|
|
|
2021-06-16 07:36:31 +08:00
|
|
|
struct cxl_nvdimm {
|
|
|
|
struct device dev;
|
|
|
|
struct cxl_memdev *cxlmd;
|
|
|
|
struct nvdimm *nvdimm;
|
|
|
|
};
|
|
|
|
|
2021-06-10 00:01:35 +08:00
|
|
|
/**
|
|
|
|
* struct cxl_port - logical collection of upstream port devices and
|
|
|
|
* downstream port devices to construct a CXL memory
|
|
|
|
* decode hierarchy.
|
|
|
|
* @dev: this port's device
|
|
|
|
* @uport: PCI or platform device implementing the upstream port capability
|
|
|
|
* @id: id for port device-name
|
2021-06-10 00:01:46 +08:00
|
|
|
* @dports: cxl_dport instances referenced by decoders
|
2021-06-10 00:43:29 +08:00
|
|
|
* @decoder_ida: allocator for decoder ids
|
2021-06-10 00:01:35 +08:00
|
|
|
* @component_reg_phys: component register capability base address (optional)
|
2022-01-24 08:29:53 +08:00
|
|
|
* @depth: How deep this port is relative to the root. depth 0 is the root.
|
2021-06-10 00:01:35 +08:00
|
|
|
*/
|
|
|
|
struct cxl_port {
|
|
|
|
struct device dev;
|
|
|
|
struct device *uport;
|
|
|
|
int id;
|
2021-06-10 00:01:46 +08:00
|
|
|
struct list_head dports;
|
2021-06-10 00:43:29 +08:00
|
|
|
struct ida decoder_ida;
|
2021-06-10 00:01:35 +08:00
|
|
|
resource_size_t component_reg_phys;
|
2022-01-24 08:29:53 +08:00
|
|
|
unsigned int depth;
|
2021-06-10 00:01:35 +08:00
|
|
|
};
|
|
|
|
|
2021-06-10 00:01:46 +08:00
|
|
|
/**
|
|
|
|
* struct cxl_dport - CXL downstream port
|
|
|
|
* @dport: PCI bridge or firmware device representing the downstream link
|
|
|
|
* @port_id: unique hardware identifier for dport in decoder target list
|
|
|
|
* @component_reg_phys: downstream port component registers
|
|
|
|
* @port: reference to cxl_port that contains this downstream port
|
|
|
|
* @list: node for a cxl_port's list of cxl_dport instances
|
|
|
|
*/
|
|
|
|
struct cxl_dport {
|
|
|
|
struct device *dport;
|
|
|
|
int port_id;
|
|
|
|
resource_size_t component_reg_phys;
|
|
|
|
struct cxl_port *port;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2022-02-01 05:33:13 +08:00
|
|
|
/*
|
|
|
|
* The platform firmware device hosting the root is also the top of the
|
|
|
|
* CXL port topology. All other CXL ports have another CXL port as their
|
|
|
|
* parent and their ->uport / host device is out-of-line of the port
|
|
|
|
* ancestry.
|
|
|
|
*/
|
|
|
|
static inline bool is_cxl_root(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
return port->uport == port->dev.parent;
|
|
|
|
}
|
|
|
|
|
2022-02-01 03:50:09 +08:00
|
|
|
bool is_cxl_port(struct device *dev);
|
2021-06-10 00:01:35 +08:00
|
|
|
struct cxl_port *to_cxl_port(struct device *dev);
|
2022-02-01 10:10:04 +08:00
|
|
|
struct pci_bus;
|
2022-02-01 00:44:52 +08:00
|
|
|
int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
|
|
|
|
struct pci_bus *bus);
|
|
|
|
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
|
2021-06-10 00:01:35 +08:00
|
|
|
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
|
|
|
|
resource_size_t component_reg_phys,
|
|
|
|
struct cxl_port *parent_port);
|
2022-02-01 08:34:40 +08:00
|
|
|
struct cxl_port *find_cxl_root(struct device *dev);
|
2022-02-01 10:10:04 +08:00
|
|
|
struct cxl_dport *devm_cxl_add_dport(struct device *host, struct cxl_port *port,
|
|
|
|
struct device *dport, int port_id,
|
|
|
|
resource_size_t component_reg_phys);
|
2021-06-10 00:43:29 +08:00
|
|
|
struct cxl_decoder *to_cxl_decoder(struct device *dev);
|
2021-06-16 07:18:17 +08:00
|
|
|
bool is_root_decoder(struct device *dev);
|
2022-02-01 03:50:09 +08:00
|
|
|
bool is_cxl_decoder(struct device *dev);
|
2022-02-01 05:33:13 +08:00
|
|
|
struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
|
|
|
|
unsigned int nr_targets);
|
|
|
|
struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
|
|
|
|
unsigned int nr_targets);
|
2021-09-22 03:22:16 +08:00
|
|
|
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
|
|
|
|
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
|
2021-06-10 00:43:29 +08:00
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
extern struct bus_type cxl_bus_type;
|
2021-06-16 07:18:11 +08:00
|
|
|
|
|
|
|
struct cxl_driver {
|
|
|
|
const char *name;
|
|
|
|
int (*probe)(struct device *dev);
|
|
|
|
void (*remove)(struct device *dev);
|
|
|
|
struct device_driver drv;
|
|
|
|
int id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
|
|
|
|
{
|
|
|
|
return container_of(drv, struct cxl_driver, drv);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
|
|
|
|
const char *modname);
|
|
|
|
#define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
|
|
|
|
void cxl_driver_unregister(struct cxl_driver *cxl_drv);
|
|
|
|
|
2022-01-24 08:29:15 +08:00
|
|
|
#define module_cxl_driver(__cxl_driver) \
|
|
|
|
module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister)
|
|
|
|
|
2021-06-16 07:36:31 +08:00
|
|
|
#define CXL_DEVICE_NVDIMM_BRIDGE 1
|
|
|
|
#define CXL_DEVICE_NVDIMM 2
|
2021-06-16 07:18:17 +08:00
|
|
|
|
2021-06-16 07:18:11 +08:00
|
|
|
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
|
|
|
|
#define CXL_MODALIAS_FMT "cxl:t%d"
|
|
|
|
|
2021-06-16 07:18:17 +08:00
|
|
|
struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
|
|
|
|
struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
|
|
|
|
struct cxl_port *port);
|
2021-06-16 07:36:31 +08:00
|
|
|
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
|
|
|
|
bool is_cxl_nvdimm(struct device *dev);
|
2021-11-12 02:19:05 +08:00
|
|
|
bool is_cxl_nvdimm_bridge(struct device *dev);
|
2021-06-16 07:36:31 +08:00
|
|
|
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
|
2021-09-09 13:13:21 +08:00
|
|
|
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
|
2021-09-15 03:14:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unit test builds overrides this to __weak, find the 'strong' version
|
|
|
|
* of these symbols in tools/testing/cxl/.
|
|
|
|
*/
|
|
|
|
#ifndef __mock
|
|
|
|
#define __mock static
|
|
|
|
#endif
|
2022-02-01 03:50:09 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PROVE_CXL_LOCKING
|
|
|
|
enum cxl_lock_class {
|
|
|
|
CXL_ANON_LOCK,
|
|
|
|
CXL_NVDIMM_LOCK,
|
|
|
|
CXL_NVDIMM_BRIDGE_LOCK,
|
|
|
|
CXL_PORT_LOCK,
|
|
|
|
/*
|
|
|
|
* Be careful to add new lock classes here, CXL_PORT_LOCK is
|
|
|
|
* extended by the port depth, so a maximum CXL port topology
|
|
|
|
* depth would need to be defined first.
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void cxl_nested_lock(struct device *dev)
|
|
|
|
{
|
|
|
|
if (is_cxl_port(dev)) {
|
|
|
|
struct cxl_port *port = to_cxl_port(dev);
|
|
|
|
|
|
|
|
mutex_lock_nested(&dev->lockdep_mutex,
|
|
|
|
CXL_PORT_LOCK + port->depth);
|
|
|
|
} else if (is_cxl_decoder(dev)) {
|
|
|
|
struct cxl_port *port = to_cxl_port(dev->parent);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A decoder is the immediate child of a port, so set
|
|
|
|
* its lock class equal to other child device siblings.
|
|
|
|
*/
|
|
|
|
mutex_lock_nested(&dev->lockdep_mutex,
|
|
|
|
CXL_PORT_LOCK + port->depth + 1);
|
|
|
|
} else if (is_cxl_nvdimm_bridge(dev))
|
|
|
|
mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_BRIDGE_LOCK);
|
|
|
|
else if (is_cxl_nvdimm(dev))
|
|
|
|
mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_LOCK);
|
|
|
|
else
|
|
|
|
mutex_lock_nested(&dev->lockdep_mutex, CXL_ANON_LOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_nested_unlock(struct device *dev)
|
|
|
|
{
|
|
|
|
mutex_unlock(&dev->lockdep_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_device_lock(struct device *dev)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For double lock errors the lockup will happen before lockdep
|
|
|
|
* warns at cxl_nested_lock(), so assert explicitly.
|
|
|
|
*/
|
|
|
|
lockdep_assert_not_held(&dev->lockdep_mutex);
|
|
|
|
|
|
|
|
device_lock(dev);
|
|
|
|
cxl_nested_lock(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_device_unlock(struct device *dev)
|
|
|
|
{
|
|
|
|
cxl_nested_unlock(dev);
|
|
|
|
device_unlock(dev);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void cxl_nested_lock(struct device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_nested_unlock(struct device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_device_lock(struct device *dev)
|
|
|
|
{
|
|
|
|
device_lock(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cxl_device_unlock(struct device *dev)
|
|
|
|
{
|
|
|
|
device_unlock(dev);
|
|
|
|
}
|
|
|
|
#endif
|
2021-02-17 12:09:51 +08:00
|
|
|
#endif /* __CXL_H__ */
|