2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

thunderbolt: Changes for v5.10 merge window

This includes following Thunderbolt/USB4 changes for v5.10 merge window:
 
   * A couple of optimizations around Tiger Lake force power logic and
     NHI (Native Host Interface) LC (Link Controller) mailbox command
     processing
 
   * Power management improvements for Software Connection Manager
 
   * Debugfs support
 
   * Allow KUnit tests to be enabled also when Thunderbolt driver is
     configured as module.
 
   * Few minor cleanups and fixes
 
 All these have been in linux-next with no reported issues.
 -----BEGIN PGP SIGNATURE-----
 
 iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAl90HsMgHG1pa2Eud2Vz
 dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKAhOhAAnR/SpkKkTPkw
 vFgL4jtQ89KY/mhJnfmbqFqcb2zLWaso9kFdWfLs6ITqOU0H3Pu1PR/shyx0Xka4
 i/kIT8iuorqO7Y1ILFtWgybXkE77AEJgtp3Q+Li/Y7ZPR0kipbjUH/fmCAWPtmHd
 Qkjd1nV72TQEkU3P7X6ob70Xkil2qn6i4fF1Kp5Mjg/8fGkqkrO2TZZPi7PXnsaG
 PySIffJY2zeEiohjl9Q/gOqBogMCPC8DgZmcS4QJ69DS8zOF9yKLUFqGQtd31BFG
 i9HgLYR8xOV7mdTVmTLZp0dzDT3rqjubnA3TfgUG7HAjQyeTZnnSGbeLlYtpoKX0
 rvL4gkEmQBkn51KiqKCvCcktzdqQ6hUjyqWjVJar63qks0AIcbDHphj37DHFW4Fc
 qO5lpON0T8JxBBQ0xDoQk+1aU6xq8QhaB+iOzyy5ZSU5vL3pcsAvqoOsxN+k0nud
 F5hfXsHwi8M/ZWcFrxNzpID0B9IVU+Fe2FXXVTMKdmpoPy/vU1xgFHj88o7vNAAb
 412u4NEzlxDOitONY6L1M6uGl9wSCVudMozfZci4dAAnAxj1/oeDurZJn5h4BtsR
 q0iP2JToSqrovd4V5uQ53MEF0JqkkYVwbAaV+y4/2afwJho3SoIDa9xv3ffpei68
 If1wkLHkBJIlWCci1fsiOuvJ+IH+rK0=
 =KvWM
 -----END PGP SIGNATURE-----

Merge tag 'thunderbolt-for-v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.10 merge window

This includes following Thunderbolt/USB4 changes for v5.10 merge window:

  * A couple of optimizations around Tiger Lake force power logic and
    NHI (Native Host Interface) LC (Link Controller) mailbox command
    processing

  * Power management improvements for Software Connection Manager

  * Debugfs support

  * Allow KUnit tests to be enabled also when Thunderbolt driver is
    configured as module.

  * Few minor cleanups and fixes

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (37 commits)
  thunderbolt: Capitalize comment on top of QUIRK_FORCE_POWER_LINK_CONTROLLER
  thunderbolt: Correct tb_check_quirks() kernel-doc
  thunderbolt: Log correct zeroX entries in decode_error()
  thunderbolt: Handle ERR_LOCK notification
  thunderbolt: Use "if USB4" instead of "depends on" in Kconfig
  thunderbolt: Allow KUnit tests to be built also when CONFIG_USB4=m
  thunderbolt: Only stop control channel when entering freeze
  thunderbolt: debugfs: Fix uninitialized return in counters_write()
  thunderbolt: Add debugfs interface
  thunderbolt: No need to warn in TB_CFG_ERROR_INVALID_CONFIG_SPACE
  thunderbolt: Introduce tb_switch_is_tiger_lake()
  thunderbolt: Introduce tb_switch_is_ice_lake()
  thunderbolt: Check for Intel vendor ID when identifying controller
  thunderbolt: Introduce tb_port_is_nhi()
  thunderbolt: Introduce tb_switch_next_cap()
  thunderbolt: Introduce tb_port_next_cap()
  thunderbolt: Move struct tb_cap_any to tb_regs.h
  thunderbolt: Add runtime PM for Software CM
  thunderbolt: Create device links from ACPI description
  ACPI: Export acpi_get_first_physical_node() to modules
  ...
This commit is contained in:
Greg Kroah-Hartman 2020-09-30 09:42:36 +02:00
commit 59ee364baf
21 changed files with 1982 additions and 302 deletions

View File

@ -551,6 +551,7 @@ struct device *acpi_get_first_physical_node(struct acpi_device *adev)
mutex_unlock(physical_node_lock); mutex_unlock(physical_node_lock);
return phys_dev; return phys_dev;
} }
EXPORT_SYMBOL_GPL(acpi_get_first_physical_node);
static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev, static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
const struct device *dev) const struct device *dev)

View File

@ -3673,63 +3673,6 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt); quirk_apple_poweroff_thunderbolt);
/*
* Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
*
* During suspend the Thunderbolt controller is reset and all PCI
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. We have to manually wait for the NHI since there is
* no parent child relationship between the NHI and the tunneled
* bridges.
*/
static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
{
struct pci_dev *sibling = NULL;
struct pci_dev *nhi = NULL;
if (!x86_apple_machine)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
/*
* Find the NHI and confirm that we are a bridge on the Thunderbolt
* host controller and not on a Thunderbolt endpoint.
*/
sibling = pci_get_slot(dev->bus, 0x0);
if (sibling == dev)
goto out; /* we are the downstream bridge to the NHI */
if (!sibling || !sibling->subordinate)
goto out;
nhi = pci_get_slot(sibling->subordinate, 0x0);
if (!nhi)
goto out;
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|| nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
out:
pci_dev_put(nhi);
pci_dev_put(sibling);
}
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif #endif
/* /*

View File

@ -16,7 +16,19 @@ menuconfig USB4
To compile this driver a module, choose M here. The module will be To compile this driver a module, choose M here. The module will be
called thunderbolt. called thunderbolt.
if USB4
config USB4_DEBUGFS_WRITE
bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
help
Enables writing to device configuration registers through
debugfs interface.
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
config USB4_KUNIT_TEST config USB4_KUNIT_TEST
bool "KUnit tests" bool "KUnit tests"
depends on KUNIT=y depends on KUNIT=y
depends on USB4=y
endif # USB4

View File

@ -4,4 +4,6 @@ thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o ee
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
thunderbolt-objs += nvm.o retimer.o quirks.o thunderbolt-objs += nvm.o retimer.o quirks.o
obj-${CONFIG_USB4_KUNIT_TEST} += test.o thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o

117
drivers/thunderbolt/acpi.c Normal file
View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI support
*
* Copyright (C) 2020, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/acpi.h>
#include "tb.h"
static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
void **return_value)
{
struct fwnode_reference_args args;
struct fwnode_handle *fwnode;
struct tb_nhi *nhi = data;
struct acpi_device *adev;
struct pci_dev *pdev;
struct device *dev;
int ret;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
fwnode = acpi_fwnode_handle(adev);
ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface",
NULL, 0, 0, &args);
if (ret)
return AE_OK;
/* It needs to reference this NHI */
if (nhi->pdev->dev.fwnode != args.fwnode)
goto out_put;
/*
* Try to find physical device walking upwards to the hierarcy.
* We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created.
*/
dev = acpi_get_first_physical_node(adev);
while (!dev) {
adev = adev->parent;
if (!adev)
break;
dev = acpi_get_first_physical_node(adev);
}
if (!dev)
goto out_put;
/*
* Check that the device is PCIe. This is because USB3
* SuperSpeed ports have this property and they are not power
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
while (!dev_is_pci(dev))
dev = dev->parent;
if (!dev)
goto out_put;
/*
* Check that this actually matches the type of device we
* expect. It should either be xHCI or PCIe root/downstream
* port.
*/
pdev = to_pci_dev(dev);
if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
(pci_is_pcie(pdev) &&
(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
const struct device_link *link;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
out_put:
fwnode_handle_put(args.fwnode);
return AE_OK;
}
/**
* tb_acpi_add_links() - Add device links based on ACPI description
* @nhi: Pointer to NHI
*
* Goes over ACPI namespace finding tunneled ports that reference to
* @nhi ACPI node. For each reference a device link is added. The link
* is automatically removed by the driver core.
*/
void tb_acpi_add_links(struct tb_nhi *nhi)
{
acpi_status status;
if (!has_acpi_companion(&nhi->pdev->dev))
return;
/*
* Find all devices that have usb4-host-controller interface
* property that references to this NHI.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
tb_acpi_add_link, NULL, nhi, NULL);
if (ACPI_FAILURE(status))
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
}

View File

@ -15,14 +15,6 @@
#define VSE_CAP_OFFSET_MAX 0xffff #define VSE_CAP_OFFSET_MAX 0xffff
#define TMU_ACCESS_EN BIT(20) #define TMU_ACCESS_EN BIT(20)
struct tb_cap_any {
union {
struct tb_cap_basic basic;
struct tb_cap_extended_short extended_short;
struct tb_cap_extended_long extended_long;
};
} __packed;
static int tb_port_enable_tmu(struct tb_port *port, bool enable) static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{ {
struct tb_switch *sw = port->sw; struct tb_switch *sw = port->sw;
@ -67,23 +59,50 @@ static void tb_port_dummy_read(struct tb_port *port)
} }
} }
/**
* tb_port_next_cap() - Return next capability in the linked list
* @port: Port to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Returns dword offset of the next capability in port config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_port_next_cap(struct tb_port *port, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return port->config.first_cap_offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
return header.basic.next;
}
static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{ {
u32 offset = 1; int offset = 0;
do { do {
struct tb_cap_any header; struct tb_cap_any header;
int ret; int ret;
offset = tb_port_next_cap(port, offset);
if (offset < 0)
return offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret) if (ret)
return ret; return ret;
if (header.basic.cap == cap) if (header.basic.cap == cap)
return offset; return offset;
} while (offset > 0);
offset = header.basic.next;
} while (offset);
return -ENOENT; return -ENOENT;
} }
@ -113,6 +132,50 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return ret; return ret;
} }
/**
* tb_switch_next_cap() - Return next capability in the linked list
* @sw: Switch to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Finds dword offset of the next capability in router config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return sw->config.first_cap_offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
if (ret)
return ret;
switch (header.basic.cap) {
case TB_SWITCH_CAP_TMU:
ret = header.basic.next;
break;
case TB_SWITCH_CAP_VSE:
if (!header.extended_short.length)
ret = header.extended_long.next;
else
ret = header.extended_short.next;
break;
default:
tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
header.basic.cap, offset);
ret = -EINVAL;
break;
}
return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
}
/** /**
* tb_switch_find_cap() - Find switch capability * tb_switch_find_cap() - Find switch capability
* @sw Switch to find the capability for * @sw Switch to find the capability for
@ -124,21 +187,23 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
*/ */
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{ {
int offset = sw->config.first_cap_offset; int offset = 0;
while (offset > 0 && offset < CAP_OFFSET_MAX) { do {
struct tb_cap_any header; struct tb_cap_any header;
int ret; int ret;
offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret) if (ret)
return ret; return ret;
if (header.basic.cap == cap) if (header.basic.cap == cap)
return offset; return offset;
} while (offset);
offset = header.basic.next;
}
return -ENOENT; return -ENOENT;
} }
@ -155,37 +220,24 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
*/ */
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec) int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{ {
struct tb_cap_any header; int offset = 0;
int offset;
offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE); do {
if (offset < 0) struct tb_cap_any header;
return offset;
while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
int ret; int ret;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret) if (ret)
return ret; return ret;
/* if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
* Extended vendor specific capabilities come in two header.extended_short.vsec_id == vsec)
* flavors: short and long. The latter is used when return offset;
* offset is over 0xff. } while (offset);
*/
if (offset >= CAP_OFFSET_MAX) {
if (header.extended_long.vsec_id == vsec)
return offset;
offset = header.extended_long.next;
} else {
if (header.extended_short.vsec_id == vsec)
return offset;
if (!header.extended_short.length)
return -ENOENT;
offset = header.extended_short.next;
}
}
return -ENOENT; return -ENOENT;
} }

View File

@ -219,6 +219,7 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response) static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{ {
struct cfg_error_pkg *pkg = response->buffer; struct cfg_error_pkg *pkg = response->buffer;
struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 }; struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header); res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0; res.response_port = 0;
@ -227,9 +228,13 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err) if (res.err)
return res; return res;
WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); if (pkg->zero1)
WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); if (pkg->zero2)
tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
if (pkg->zero3)
tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
res.err = 1; res.err = 1;
res.tb_error = pkg->error; res.tb_error = pkg->error;
res.response_port = pkg->port; res.response_port = pkg->port;
@ -266,9 +271,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in * Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write. * cfg_read/cfg_write.
*/ */
tb_ctl_WARN(ctl, tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
"CFG_ERROR(%llx:%x): Invalid config space or offset\n", res->response_route, res->response_port);
res->response_route, res->response_port);
return; return;
case TB_CFG_ERROR_NO_SUCH_PORT: case TB_CFG_ERROR_NO_SUCH_PORT:
/* /*
@ -283,6 +287,10 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port); res->response_route, res->response_port);
return; return;
case TB_CFG_ERROR_LOCK:
tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
res->response_route, res->response_port);
return;
default: default:
/* 5,6,7,9 and 11 are also valid error codes */ /* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
@ -951,6 +959,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
return -ENODEV; return -ENODEV;
tb_cfg_print_error(ctl, res); tb_cfg_print_error(ctl, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
return -EIO; return -EIO;
} }

View File

@ -0,0 +1,701 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Debugfs interface
*
* Copyright (C) 2020, Intel Corporation
* Authors: Gil Fine <gil.fine@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include "tb.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_POWER_LEN 2
#define PORT_CAP_LANE_LEN 3
#define PORT_CAP_USB3_LEN 5
#define PORT_CAP_DP_LEN 8
#define PORT_CAP_TMU_LEN 8
#define PORT_CAP_BASIC_LEN 9
#define PORT_CAP_USB4_LEN 20
#define SWITCH_CAP_TMU_LEN 26
#define SWITCH_CAP_BASIC_LEN 27
#define PATH_LEN 2
#define COUNTER_SET_LEN 3
#define DEBUGFS_ATTR(__space, __write) \
static int __space ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __space ## _show, inode->i_private); \
} \
\
static const struct file_operations __space ## _fops = { \
.owner = THIS_MODULE, \
.open = __space ## _open, \
.release = single_release, \
.read = seq_read, \
.write = __write, \
.llseek = seq_lseek, \
}
#define DEBUGFS_ATTR_RO(__space) \
DEBUGFS_ATTR(__space, NULL)
#define DEBUGFS_ATTR_RW(__space) \
DEBUGFS_ATTR(__space, __space ## _write)
static struct dentry *tb_debugfs_root;
static void *validate_and_copy_from_user(const void __user *user_buf,
size_t *count)
{
size_t nbytes;
void *buf;
if (!*count)
return ERR_PTR(-EINVAL);
if (!access_ok(user_buf, *count))
return ERR_PTR(-EFAULT);
buf = (void *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
nbytes = min_t(size_t, *count, PAGE_SIZE);
if (copy_from_user(buf, user_buf, nbytes)) {
free_page((unsigned long)buf);
return ERR_PTR(-EFAULT);
}
*count = nbytes;
return buf;
}
static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
int long_fmt_len)
{
char *token;
u32 v[5];
int ret;
token = strsep(line, "\n");
if (!token)
return false;
/*
* For Adapter/Router configuration space:
* Short format is: offset value\n
* v[0] v[1]
* Long format as produced from the read side:
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
* For Counter configuration space:
* Short format is: offset\n
* v[0]
* Long format as produced from the read side:
* offset relative_offset counter_id value\n
* v[0] v[1] v[2] v[3]
*/
ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
/* In case of Counters, clear counter, "val" content is NA */
if (ret == short_fmt_len) {
*offs = v[0];
*val = v[short_fmt_len - 1];
return true;
} else if (ret == long_fmt_len) {
*offs = v[0];
*val = v[long_fmt_len - 1];
return true;
}
return false;
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
line = buf;
while (parse_line(&line, &offset, &val, 2, 5)) {
if (port)
ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
else
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
break;
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
return regs_write(port->sw, port, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
return regs_write(sw, NULL, user_buf, count, ppos);
}
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
#define switch_regs_write NULL
#define DEBUGFS_MODE 0400
#endif
static int port_clear_all_counters(struct tb_port *port)
{
u32 *buf;
int ret;
buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
COUNTER_SET_LEN * port->config.max_counters);
kfree(buf);
return ret;
}
static ssize_t counters_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = port->sw->tb;
char *buf;
int ret;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
/* If written delimiter only, clear all counters in one shot */
if (buf[0] == '\n') {
ret = port_clear_all_counters(port);
} else {
char *line = buf;
u32 val, offset;
ret = -EINVAL;
while (parse_line(&line, &offset, &val, 1, 4)) {
ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
offset, 1);
if (ret)
break;
}
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static void cap_show(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap, u8 cap_id,
u8 vsec_id, int length)
{
int ret, offset = 0;
while (length > 0) {
int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
u32 data[TB_MAX_CONFIG_RW_LENGTH];
if (port)
ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
dwords);
else
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n",
cap + offset);
if (dwords > 1)
seq_printf(s, "0x%04x ...\n", cap + offset + 1);
return;
}
for (i = 0; i < dwords; i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
cap + offset + i, offset + i,
cap_id, vsec_id, data[i]);
}
length -= dwords;
offset += dwords;
}
}
static void port_cap_show(struct tb_port *port, struct seq_file *s,
unsigned int cap)
{
struct tb_cap_any header;
u8 vsec_id = 0;
size_t length;
int ret;
ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n", cap);
return;
}
switch (header.basic.cap) {
case TB_PORT_CAP_PHY:
length = PORT_CAP_LANE_LEN;
break;
case TB_PORT_CAP_TIME1:
length = PORT_CAP_TMU_LEN;
break;
case TB_PORT_CAP_POWER:
length = PORT_CAP_POWER_LEN;
break;
case TB_PORT_CAP_ADAP:
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
length = PORT_CAP_DP_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
} else {
seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
break;
case TB_PORT_CAP_VSE:
if (!header.extended_short.length) {
ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
cap + 1, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n",
cap + 1);
return;
}
length = header.extended_long.length;
vsec_id = header.extended_short.vsec_id;
} else {
length = header.extended_short.length;
vsec_id = header.extended_short.vsec_id;
/*
* Ice Lake and Tiger Lake do not implement the
* full length of the capability, only first 32
* dwords so hard-code it here.
*/
if (!vsec_id &&
(tb_switch_is_ice_lake(port->sw) ||
tb_switch_is_tiger_lake(port->sw)))
length = 32;
}
break;
case TB_PORT_CAP_USB4:
length = PORT_CAP_USB4_LEN;
break;
default:
seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
}
static void port_caps_show(struct tb_port *port, struct seq_file *s)
{
int cap;
cap = tb_port_next_cap(port, 0);
while (cap > 0) {
port_cap_show(port, s, cap);
cap = tb_port_next_cap(port, cap);
}
}
static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
{
u32 data[PORT_CAP_BASIC_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(data); i++)
seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
return 0;
}
static int port_regs_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int ret;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
ret = port_basic_regs_show(port, s);
if (ret)
goto out_unlock;
port_caps_show(port, s);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(port_regs);
static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
unsigned int cap)
{
struct tb_cap_any header;
int ret, length;
u8 vsec_id = 0;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n", cap);
return;
}
if (header.basic.cap == TB_SWITCH_CAP_VSE) {
if (!header.extended_short.length) {
ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
cap + 1, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n",
cap + 1);
return;
}
length = header.extended_long.length;
} else {
length = header.extended_short.length;
}
vsec_id = header.extended_short.vsec_id;
} else {
if (header.basic.cap == TB_SWITCH_CAP_TMU) {
length = SWITCH_CAP_TMU_LEN;
} else {
seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
}
cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
}
static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
{
int cap;
cap = tb_switch_next_cap(sw, 0);
while (cap > 0) {
switch_cap_show(sw, s, cap);
cap = tb_switch_next_cap(sw, cap);
}
}
static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
{
u32 data[SWITCH_CAP_BASIC_LEN];
size_t dwords;
int ret, i;
/* Only USB4 has the additional registers */
if (tb_switch_is_usb4(sw))
dwords = ARRAY_SIZE(data);
else
dwords = 7;
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
if (ret)
return ret;
for (i = 0; i < dwords; i++)
seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
return 0;
}
static int switch_regs_show(struct seq_file *s, void *not_used)
{
struct tb_switch *sw = s->private;
struct tb *tb = sw->tb;
int ret;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
ret = switch_basic_regs_show(sw, s);
if (ret)
goto out_unlock;
switch_caps_show(sw, s);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(switch_regs);
static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
{
u32 data[PATH_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
ARRAY_SIZE(data));
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
return ret;
}
for (i = 0; i < ARRAY_SIZE(data); i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
hopid * PATH_LEN + i, i, hopid, data[i]);
}
return 0;
}
static int path_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int start, i, ret = 0;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset in_hop_id value\n");
/* NHI and lane adapters have entry for path 0 */
if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
ret = path_show_one(port, s, 0);
if (ret)
goto out_unlock;
}
start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
for (i = start; i <= port->config.max_in_hop_id; i++) {
ret = path_show_one(port, s, i);
if (ret)
break;
}
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RO(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
{
u32 data[COUNTER_SET_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_COUNTERS,
counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n",
counter * COUNTER_SET_LEN);
return ret;
}
for (i = 0; i < ARRAY_SIZE(data); i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
counter * COUNTER_SET_LEN + i, i, counter, data[i]);
}
return 0;
}
static int counters_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int i, ret = 0;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
seq_puts(s, "# offset relative_offset counter_id value\n");
for (i = 0; i < port->config.max_counters; i++) {
ret = counter_set_regs_show(port, s, i);
if (ret)
break;
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(counters);
/**
* tb_switch_debugfs_init() - Add debugfs entries for router
* @sw: Pointer to the router
*
* Adds debugfs directories and files for given router.
*/
void tb_switch_debugfs_init(struct tb_switch *sw)
{
struct dentry *debugfs_dir;
struct tb_port *port;
debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
char dir_name[10];
if (port->disabled)
continue;
if (port->config.type == TB_TYPE_INACTIVE)
continue;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
port, &port_regs_fops);
debugfs_create_file("path", 0400, debugfs_dir, port,
&path_fops);
if (port->config.counters_support)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
}
}
/**
* tb_switch_debugfs_remove() - Remove all router debugfs entries
* @sw: Pointer to the router
*
* Removes all previously added debugfs entries under this router.
*/
void tb_switch_debugfs_remove(struct tb_switch *sw)
{
debugfs_remove_recursive(sw->debugfs_dir);
}
void tb_debugfs_init(void)
{
tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
}
void tb_debugfs_exit(void)
{
debugfs_remove_recursive(tb_debugfs_root);
}

View File

@ -275,7 +275,7 @@ static struct attribute *domain_attrs[] = {
static umode_t domain_attr_is_visible(struct kobject *kobj, static umode_t domain_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n) struct attribute *attr, int n)
{ {
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = kobj_to_dev(kobj);
struct tb *tb = container_of(dev, struct tb, dev); struct tb *tb = container_of(dev, struct tb, dev);
if (attr == &dev_attr_boot_acl.attr) { if (attr == &dev_attr_boot_acl.attr) {
@ -455,6 +455,8 @@ int tb_domain_add(struct tb *tb)
/* This starts event processing */ /* This starts event processing */
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
device_init_wakeup(&tb->dev, true);
pm_runtime_no_callbacks(&tb->dev); pm_runtime_no_callbacks(&tb->dev);
pm_runtime_set_active(&tb->dev); pm_runtime_set_active(&tb->dev);
pm_runtime_enable(&tb->dev); pm_runtime_enable(&tb->dev);
@ -544,6 +546,33 @@ int tb_domain_suspend(struct tb *tb)
return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
} }
int tb_domain_freeze_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
if (tb->cm_ops->freeze_noirq)
ret = tb->cm_ops->freeze_noirq(tb);
if (!ret)
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
return ret;
}
int tb_domain_thaw_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
tb_ctl_start(tb->ctl);
if (tb->cm_ops->thaw_noirq)
ret = tb->cm_ops->thaw_noirq(tb);
mutex_unlock(&tb->lock);
return ret;
}
void tb_domain_complete(struct tb *tb) void tb_domain_complete(struct tb *tb)
{ {
if (tb->cm_ops->complete) if (tb->cm_ops->complete)
@ -798,12 +827,23 @@ int tb_domain_init(void)
{ {
int ret; int ret;
tb_test_init();
tb_debugfs_init();
ret = tb_xdomain_init(); ret = tb_xdomain_init();
if (ret) if (ret)
return ret; goto err_debugfs;
ret = bus_register(&tb_bus_type); ret = bus_register(&tb_bus_type);
if (ret) if (ret)
tb_xdomain_exit(); goto err_xdomain;
return 0;
err_xdomain:
tb_xdomain_exit();
err_debugfs:
tb_debugfs_exit();
tb_test_exit();
return ret; return ret;
} }
@ -814,4 +854,6 @@ void tb_domain_exit(void)
ida_destroy(&tb_domain_ida); ida_destroy(&tb_domain_ida);
tb_nvm_exit(); tb_nvm_exit();
tb_xdomain_exit(); tb_xdomain_exit();
tb_debugfs_exit();
tb_test_exit();
} }

View File

@ -1635,11 +1635,14 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
static bool icm_tgl_is_supported(struct tb *tb) static bool icm_tgl_is_supported(struct tb *tb)
{ {
u32 val;
/* /*
* If the firmware is not running use software CM. This platform * If the firmware is not running use software CM. This platform
* should fully support both. * should fully support both.
*/ */
return icm_firmware_running(tb->nhi); val = ioread32(tb->nhi->iobase + REG_FW_STS);
return !!(val & REG_FW_STS_NVM_AUTH_DONE);
} }
static void icm_handle_notification(struct work_struct *work) static void icm_handle_notification(struct work_struct *work)

View File

@ -45,7 +45,7 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size; return sw->cap_lc + start + phys * size;
} }
static int tb_lc_configure_lane(struct tb_port *port, bool configure) static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{ {
bool upstream = tb_is_upstream_port(port); bool upstream = tb_is_upstream_port(port);
struct tb_switch *sw = port->sw; struct tb_switch *sw = port->sw;
@ -69,7 +69,7 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
else else
lane = TB_LC_SX_CTRL_L2C; lane = TB_LC_SX_CTRL_L2C;
if (configure) { if (configured) {
ctrl |= lane; ctrl |= lane;
if (upstream) if (upstream)
ctrl |= TB_LC_SX_CTRL_UPSTREAM; ctrl |= TB_LC_SX_CTRL_UPSTREAM;
@ -83,55 +83,146 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
} }
/** /**
* tb_lc_configure_link() - Let LC know about configured link * tb_lc_configure_port() - Let LC know about configured port
* @sw: Switch that is being added * @port: Port that is set as configured
* *
* Informs LC of both parent switch and @sw that there is established * Sets the port configured for power management purposes.
* link between the two.
*/ */
int tb_lc_configure_link(struct tb_switch *sw) int tb_lc_configure_port(struct tb_port *port)
{ {
struct tb_port *up, *down; return tb_lc_set_port_configured(port, true);
int ret;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
/* Configure parent link toward this switch */
ret = tb_lc_configure_lane(down, true);
if (ret)
return ret;
/* Configure upstream link from this switch to the parent */
ret = tb_lc_configure_lane(up, true);
if (ret)
tb_lc_configure_lane(down, false);
return ret;
} }
/** /**
* tb_lc_unconfigure_link() - Let LC know about unconfigured link * tb_lc_unconfigure_port() - Let LC know about unconfigured port
* @sw: Switch to unconfigure * @port: Port that is set as configured
* *
* Informs LC of both parent switch and @sw that the link between the * Sets the port unconfigured for power management purposes.
* two does not exist anymore.
*/ */
void tb_lc_unconfigure_link(struct tb_switch *sw) void tb_lc_unconfigure_port(struct tb_port *port)
{ {
struct tb_port *up, *down; tb_lc_set_port_configured(port, false);
}
if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw)) static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
return; {
struct tb_switch *sw = port->sw;
u32 ctrl, lane;
int cap, ret;
up = tb_upstream_port(sw); if (sw->generation < 2)
down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); return 0;
tb_lc_configure_lane(up, false); cap = find_port_lc_cap(port);
tb_lc_configure_lane(down, false); if (cap < 0)
return cap;
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
/* Resolve correct lane */
if (port->port % 2)
lane = TB_LC_SX_CTRL_L1D;
else
lane = TB_LC_SX_CTRL_L2D;
if (configure)
ctrl |= lane;
else
ctrl &= ~lane;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_configure_xdomain() - Inform LC that the link is XDomain
* @port: Switch downstream port connected to another host
*
* Sets the lane configured for XDomain accordingly so that the LC knows
* about this. Returns %0 in success and negative errno in failure.
*/
int tb_lc_configure_xdomain(struct tb_port *port)
{
return tb_lc_set_xdomain_configured(port, true);
}
/**
* tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
* @port: Switch downstream port that was connected to another host
*
* Unsets the lane XDomain configuration.
*/
void tb_lc_unconfigure_xdomain(struct tb_port *port)
{
tb_lc_set_xdomain_configured(port, false);
}
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
u32 ctrl;
int ret;
/*
* Enable wake on PCIe and USB4 (wake coming from another
* router).
*/
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
offset + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
TB_LC_SX_CTRL_WOU4);
if (flags & TB_WAKE_ON_CONNECT)
ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
if (flags & TB_WAKE_ON_USB4)
ctrl |= TB_LC_SX_CTRL_WOU4;
if (flags & TB_WAKE_ON_PCIE)
ctrl |= TB_LC_SX_CTRL_WOP;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_set_wake() - Enable/disable wake
* @sw: Switch whose wakes to configure
* @flags: Wakeup flags (%0 to disable)
*
* For each LC sets wake bits accordingly.
*/
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
{
int start, size, nlc, ret, i;
u32 desc;
if (sw->generation < 2)
return 0;
if (!tb_route(sw))
return 0;
ret = read_lc_desc(sw, &desc);
if (ret)
return ret;
/* Figure out number of link controllers */
nlc = desc & TB_LC_DESC_NLC_MASK;
start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
/* For each link controller set sleep bit */
for (i = 0; i < nlc; i++) {
unsigned int offset = sw->cap_lc + start + i * size;
ret = tb_lc_set_wake_one(sw, offset, flags);
if (ret)
return ret;
}
return 0;
} }
/** /**

View File

@ -17,6 +17,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include "nhi.h" #include "nhi.h"
#include "nhi_regs.h" #include "nhi_regs.h"
@ -863,6 +864,22 @@ static int nhi_suspend_noirq(struct device *dev)
return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
} }
static int nhi_freeze_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_freeze_noirq(tb);
}
static int nhi_thaw_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_thaw_noirq(tb);
}
static bool nhi_wake_supported(struct pci_dev *pdev) static bool nhi_wake_supported(struct pci_dev *pdev)
{ {
u8 val; u8 val;
@ -1069,6 +1086,69 @@ static bool nhi_imr_valid(struct pci_dev *pdev)
return true; return true;
} }
/*
* During suspend the Thunderbolt controller is reset and all PCIe
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. This adds device links between the tunneled PCIe
* downstream ports and the NHI so that the device core will make sure
* NHI is resumed first before the rest.
*/
static void tb_apple_add_links(struct tb_nhi *nhi)
{
struct pci_dev *upstream, *pdev;
if (!x86_apple_machine)
return;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
break;
default:
return;
}
upstream = pci_upstream_bridge(nhi->pdev);
while (upstream) {
if (!pci_is_pcie(upstream))
return;
if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
break;
upstream = pci_upstream_bridge(upstream);
}
if (!upstream)
return;
/*
* For each hotplug downstream port, create add device link
* back to NHI so that PCIe tunnels can be re-established after
* sleep.
*/
for_each_pci_bridge(pdev, upstream->subordinate) {
const struct device_link *link;
if (!pci_is_pcie(pdev))
continue;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
!pdev->is_hotplug_bridge)
continue;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
struct tb_nhi *nhi; struct tb_nhi *nhi;
@ -1134,6 +1214,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res; return res;
} }
tb_apple_add_links(nhi);
tb_acpi_add_links(nhi);
tb = icm_probe(nhi); tb = icm_probe(nhi);
if (!tb) if (!tb)
tb = tb_probe(nhi); tb = tb_probe(nhi);
@ -1157,6 +1240,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
pci_set_drvdata(pdev, tb); pci_set_drvdata(pdev, tb);
device_wakeup_enable(&pdev->dev);
pm_runtime_allow(&pdev->dev); pm_runtime_allow(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
@ -1186,14 +1271,13 @@ static void nhi_remove(struct pci_dev *pdev)
static const struct dev_pm_ops nhi_pm_ops = { static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq, .suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq, .resume_noirq = nhi_resume_noirq,
.freeze_noirq = nhi_suspend_noirq, /* .freeze_noirq = nhi_freeze_noirq, /*
* we just disable hotplug, the * we just disable hotplug, the
* pci-tunnels stay alive. * pci-tunnels stay alive.
*/ */
.thaw_noirq = nhi_resume_noirq, .thaw_noirq = nhi_thaw_noirq,
.restore_noirq = nhi_resume_noirq, .restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend, .suspend = nhi_suspend,
.freeze = nhi_suspend,
.poweroff_noirq = nhi_poweroff_noirq, .poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend, .poweroff = nhi_suspend,
.complete = nhi_complete, .complete = nhi_complete,

View File

@ -59,7 +59,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
if (power) { if (power) {
unsigned int retries = 10; unsigned int retries = 350;
u32 val; u32 val;
/* Wait until the firmware tells it is up and running */ /* Wait until the firmware tells it is up and running */
@ -67,7 +67,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
if (val & VS_CAP_9_FW_READY) if (val & VS_CAP_9_FW_READY)
return 0; return 0;
msleep(250); usleep_range(3000, 3100);
} while (--retries); } while (--retries);
return -ETIMEDOUT; return -ETIMEDOUT;
@ -97,7 +97,7 @@ static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
if (data & VS_CAP_18_DONE) if (data & VS_CAP_18_DONE)
goto clear; goto clear;
msleep(100); usleep_range(1000, 1100);
} while (time_before(jiffies, end)); } while (time_before(jiffies, end));
return -ETIMEDOUT; return -ETIMEDOUT;
@ -121,31 +121,38 @@ static void icl_nhi_set_ltr(struct tb_nhi *nhi)
static int icl_nhi_suspend(struct tb_nhi *nhi) static int icl_nhi_suspend(struct tb_nhi *nhi)
{ {
struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret; int ret;
if (icl_nhi_is_device_connected(nhi)) if (icl_nhi_is_device_connected(nhi))
return 0; return 0;
/* if (tb_switch_is_icm(tb->root_switch)) {
* If there is no device connected we need to perform both: a /*
* handshake through LC mailbox and force power down before * If there is no device connected we need to perform
* entering D3. * both: a handshake through LC mailbox and force power
*/ * down before entering D3.
icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); */
ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
if (ret) ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
return ret; if (ret)
return ret;
}
return icl_nhi_force_power(nhi, false); return icl_nhi_force_power(nhi, false);
} }
static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
{ {
struct tb *tb = pci_get_drvdata(nhi->pdev);
enum icl_lc_mailbox_cmd cmd; enum icl_lc_mailbox_cmd cmd;
if (!pm_suspend_via_firmware()) if (!pm_suspend_via_firmware())
return icl_nhi_suspend(nhi); return icl_nhi_suspend(nhi);
if (!tb_switch_is_icm(tb->root_switch))
return 0;
cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
icl_nhi_lc_mailbox_cmd(nhi, cmd); icl_nhi_lc_mailbox_cmd(nhi, cmd);
return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);

View File

@ -27,7 +27,7 @@ static const struct tb_quirk tb_quirks[] = {
* tb_check_quirks() - Check for quirks to apply * tb_check_quirks() - Check for quirks to apply
* @sw: Thunderbolt switch * @sw: Thunderbolt switch
* *
* Apply any quirks for the Thunderbolt controller * Apply any quirks for the Thunderbolt controller.
*/ */
void tb_check_quirks(struct tb_switch *sw) void tb_check_quirks(struct tb_switch *sw)
{ {

View File

@ -601,6 +601,13 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged) if (credits == 0 || port->sw->is_unplugged)
return 0; return 0;
/*
* USB4 restricts programming NFC buffers to lane adapters only
* so skip other ports.
*/
if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
return 0;
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits; nfc_credits += credits;
@ -666,6 +673,50 @@ int tb_port_unlock(struct tb_port *port)
return 0; return 0;
} }
static int __tb_port_enable(struct tb_port *port, bool enable)
{
int ret;
u32 phy;
if (!tb_port_is_null(port))
return -EINVAL;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy &= ~LANE_ADP_CS_1_LD;
else
phy |= LANE_ADP_CS_1_LD;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
* tb_port_enable() - Enable lane adapter
* @port: Port to enable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to enable it.
*/
int tb_port_enable(struct tb_port *port)
{
return __tb_port_enable(port, true);
}
/**
* tb_port_disable() - Disable lane adapter
* @port: Port to disable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to disable it.
*/
int tb_port_disable(struct tb_port *port)
{
return __tb_port_enable(port, false);
}
/** /**
* tb_init_port() - initialize a port * tb_init_port() - initialize a port
* *
@ -739,7 +790,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
* NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
* reserved. * reserved.
*/ */
if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID) if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
min_hopid = TB_PATH_MIN_HOPID; min_hopid = TB_PATH_MIN_HOPID;
if (max_hopid < 0 || max_hopid > port_max_hopid) if (max_hopid < 0 || max_hopid > port_max_hopid)
@ -1227,23 +1278,24 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
/** /**
* reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
* @sw: Switch to reset
* *
* Return: Returns 0 on success or an error code on failure. * Return: Returns 0 on success or an error code on failure.
*/ */
int tb_switch_reset(struct tb *tb, u64 route) int tb_switch_reset(struct tb_switch *sw)
{ {
struct tb_cfg_result res; struct tb_cfg_result res;
struct tb_regs_switch_header header = {
header.route_hi = route >> 32, if (sw->generation > 1)
header.route_lo = route, return 0;
header.enabled = true,
}; tb_sw_dbg(sw, "resetting switch\n");
tb_dbg(tb, "resetting switch at %llx\n", route);
res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
0, 2, 2, 2); TB_CFG_SWITCH, 2, 2);
if (res.err) if (res.err)
return res.err; return res.err;
res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
if (res.err > 0) if (res.err > 0)
return -EIO; return -EIO;
return res.err; return res.err;
@ -1261,7 +1313,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data; u32 data;
int res; int res;
if (tb_switch_is_icm(sw)) if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
return 0; return 0;
sw->config.plug_events_delay = 0xff; sw->config.plug_events_delay = 0xff;
@ -1269,10 +1321,6 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res) if (res)
return res; return res;
/* Plug events are always enabled in USB4 */
if (tb_switch_is_usb4(sw))
return 0;
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res) if (res)
return res; return res;
@ -1649,7 +1697,7 @@ static struct attribute *switch_attrs[] = {
static umode_t switch_attr_is_visible(struct kobject *kobj, static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n) struct attribute *attr, int n)
{ {
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = kobj_to_dev(kobj);
struct tb_switch *sw = tb_to_switch(dev); struct tb_switch *sw = tb_to_switch(dev);
if (attr == &dev_attr_device.attr) { if (attr == &dev_attr_device.attr) {
@ -1988,7 +2036,7 @@ int tb_switch_configure(struct tb_switch *sw)
route = tb_route(sw); route = tb_route(sw);
tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
sw->config.enabled ? "restoring " : "initializing", route, sw->config.enabled ? "restoring" : "initializing", route,
tb_route_length(route), sw->config.upstream_port_number); tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1; sw->config.enabled = 1;
@ -2008,10 +2056,6 @@ int tb_switch_configure(struct tb_switch *sw)
return ret; return ret;
ret = usb4_switch_setup(sw); ret = usb4_switch_setup(sw);
if (ret)
return ret;
ret = usb4_switch_configure_link(sw);
} else { } else {
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n", tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@ -2025,10 +2069,6 @@ int tb_switch_configure(struct tb_switch *sw)
/* Enumerate the switch */ /* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
ROUTER_CS_1, 3); ROUTER_CS_1, 3);
if (ret)
return ret;
ret = tb_lc_configure_link(sw);
} }
if (ret) if (ret)
return ret; return ret;
@ -2311,6 +2351,69 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
tb_sw_dbg(sw, "lane bonding disabled\n"); tb_sw_dbg(sw, "lane bonding disabled\n");
} }
/**
* tb_switch_configure_link() - Set link configured
* @sw: Switch whose link is configured
*
* Sets the link upstream from @sw configured (from both ends) so that
* it will not be disconnected when the domain exits sleep. Can be
* called for any switch.
*
* It is recommended that this is called after lane bonding is enabled.
*
* Returns %0 on success and negative errno in case of error.
*/
int tb_switch_configure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
ret = usb4_port_configure(up);
else
ret = tb_lc_configure_port(up);
if (ret)
return ret;
down = up->remote;
if (tb_switch_is_usb4(down->sw))
return usb4_port_configure(down);
return tb_lc_configure_port(down);
}
/**
* tb_switch_unconfigure_link() - Unconfigure link
* @sw: Switch whose link is unconfigured
*
* Sets the link unconfigured so the @sw will be disconnected if the
* domain exists sleep.
*/
void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
if (sw->is_unplugged)
return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
usb4_port_unconfigure(up);
else
tb_lc_unconfigure_port(up);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
}
/** /**
* tb_switch_add() - Add a switch to the domain * tb_switch_add() - Add a switch to the domain
* @sw: Switch to add * @sw: Switch to add
@ -2399,6 +2502,13 @@ int tb_switch_add(struct tb_switch *sw)
return ret; return ret;
} }
/*
* Thunderbolt routers do not generate wakeups themselves but
* they forward wakeups from tunneled protocols, so enable it
* here.
*/
device_init_wakeup(&sw->dev, true);
pm_runtime_set_active(&sw->dev); pm_runtime_set_active(&sw->dev);
if (sw->rpm) { if (sw->rpm) {
pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
@ -2408,6 +2518,7 @@ int tb_switch_add(struct tb_switch *sw)
pm_request_autosuspend(&sw->dev); pm_request_autosuspend(&sw->dev);
} }
tb_switch_debugfs_init(sw);
return 0; return 0;
} }
@ -2423,6 +2534,8 @@ void tb_switch_remove(struct tb_switch *sw)
{ {
struct tb_port *port; struct tb_port *port;
tb_switch_debugfs_remove(sw);
if (sw->rpm) { if (sw->rpm) {
pm_runtime_get_sync(&sw->dev); pm_runtime_get_sync(&sw->dev);
pm_runtime_disable(&sw->dev); pm_runtime_disable(&sw->dev);
@ -2445,11 +2558,6 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged) if (!sw->is_unplugged)
tb_plug_events_active(sw, false); tb_plug_events_active(sw, false);
if (tb_switch_is_usb4(sw))
usb4_switch_unconfigure_link(sw);
else
tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw); tb_switch_nvm_remove(sw);
if (tb_route(sw)) if (tb_route(sw))
@ -2481,6 +2589,18 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
} }
} }
static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
else
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
return usb4_switch_set_wake(sw, flags);
return tb_lc_set_wake(sw, flags);
}
int tb_switch_resume(struct tb_switch *sw) int tb_switch_resume(struct tb_switch *sw)
{ {
struct tb_port *port; struct tb_port *port;
@ -2526,6 +2646,13 @@ int tb_switch_resume(struct tb_switch *sw)
if (err) if (err)
return err; return err;
/* Disable wakes */
tb_switch_set_wake(sw, 0);
err = tb_switch_tmu_init(sw);
if (err)
return err;
/* check for surviving downstream switches */ /* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) { tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain) if (!tb_port_has_remote(port) && !port->xdomain)
@ -2555,20 +2682,43 @@ int tb_switch_resume(struct tb_switch *sw)
return 0; return 0;
} }
void tb_switch_suspend(struct tb_switch *sw) /**
* tb_switch_suspend() - Put a switch to sleep
* @sw: Switch to suspend
* @runtime: Is this runtime suspend or system sleep
*
* Suspends router and all its children. Enables wakes according to
* value of @runtime and then sets sleep bit for the router. If @sw is
* host router the domain is ready to go to sleep once this function
* returns.
*/
void tb_switch_suspend(struct tb_switch *sw, bool runtime)
{ {
unsigned int flags = 0;
struct tb_port *port; struct tb_port *port;
int err; int err;
tb_sw_dbg(sw, "suspending switch\n");
err = tb_plug_events_active(sw, false); err = tb_plug_events_active(sw, false);
if (err) if (err)
return; return;
tb_switch_for_each_port(sw, port) { tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port)) if (tb_port_has_remote(port))
tb_switch_suspend(port->remote->sw); tb_switch_suspend(port->remote->sw, runtime);
} }
if (runtime) {
/* Trigger wake when something is plugged in/out */
flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
} else if (device_may_wakeup(&sw->dev)) {
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
tb_switch_set_wake(sw, flags);
if (tb_switch_is_usb4(sw)) if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw); usb4_switch_set_sleep(sw);
else else

View File

@ -9,6 +9,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "tb.h" #include "tb.h"
#include "tb_regs.h" #include "tb_regs.h"
@ -22,13 +23,21 @@
* events and exit if this is not set (it needs to * events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq * acquire the lock one more time). Used to drain wq
* after cfg has been paused. * after cfg has been paused.
* @remove_work: Work used to remove any unplugged routers after
* runtime resume
*/ */
struct tb_cm { struct tb_cm {
struct list_head tunnel_list; struct list_head tunnel_list;
struct list_head dp_resources; struct list_head dp_resources;
bool hotplug_active; bool hotplug_active;
struct delayed_work remove_work;
}; };
static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
{
return ((void *)tcm - sizeof(struct tb));
}
struct tb_hotplug_event { struct tb_hotplug_event {
struct work_struct work; struct work_struct work;
struct tb *tb; struct tb *tb;
@ -140,6 +149,29 @@ static void tb_discover_tunnels(struct tb_switch *sw)
} }
} }
static int tb_port_configure_xdomain(struct tb_port *port)
{
/*
* XDomain paths currently only support single lane so we must
* disable the other lane according to USB4 spec.
*/
tb_port_disable(port->dual_link_port);
if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port);
return tb_lc_configure_xdomain(port);
}
static void tb_port_unconfigure_xdomain(struct tb_port *port)
{
if (tb_switch_is_usb4(port->sw))
usb4_port_unconfigure_xdomain(port);
else
tb_lc_unconfigure_xdomain(port);
tb_port_enable(port->dual_link_port);
}
static void tb_scan_xdomain(struct tb_port *port) static void tb_scan_xdomain(struct tb_port *port)
{ {
struct tb_switch *sw = port->sw; struct tb_switch *sw = port->sw;
@ -158,6 +190,7 @@ static void tb_scan_xdomain(struct tb_port *port)
NULL); NULL);
if (xd) { if (xd) {
tb_port_at(route, sw)->xdomain = xd; tb_port_at(route, sw)->xdomain = xd;
tb_port_configure_xdomain(port);
tb_xdomain_add(xd); tb_xdomain_add(xd);
} }
} }
@ -502,8 +535,13 @@ static void tb_scan_switch(struct tb_switch *sw)
{ {
struct tb_port *port; struct tb_port *port;
pm_runtime_get_sync(&sw->dev);
tb_switch_for_each_port(sw, port) tb_switch_for_each_port(sw, port)
tb_scan_port(port); tb_scan_port(port);
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
} }
/** /**
@ -566,6 +604,7 @@ static void tb_scan_port(struct tb_port *port)
*/ */
if (port->xdomain) { if (port->xdomain) {
tb_xdomain_remove(port->xdomain); tb_xdomain_remove(port->xdomain);
tb_port_unconfigure_xdomain(port);
port->xdomain = NULL; port->xdomain = NULL;
} }
@ -577,6 +616,12 @@ static void tb_scan_port(struct tb_port *port)
if (!tcm->hotplug_active) if (!tcm->hotplug_active)
dev_set_uevent_suppress(&sw->dev, true); dev_set_uevent_suppress(&sw->dev, true);
/*
* At the moment Thunderbolt 2 and beyond (devices with LC) we
* can support runtime PM.
*/
sw->rpm = sw->generation > 1;
if (tb_switch_add(sw)) { if (tb_switch_add(sw)) {
tb_switch_put(sw); tb_switch_put(sw);
return; return;
@ -592,8 +637,9 @@ static void tb_scan_port(struct tb_port *port)
} }
/* Enable lane bonding if supported */ /* Enable lane bonding if supported */
if (tb_switch_lane_bonding_enable(sw)) tb_switch_lane_bonding_enable(sw);
tb_sw_warn(sw, "failed to enable lane bonding\n"); /* Set the link configured */
tb_switch_configure_link(sw);
if (tb_enable_tmu(sw)) if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n"); tb_sw_warn(sw, "failed to enable TMU\n");
@ -636,6 +682,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* deallocated properly. * deallocated properly.
*/ */
tb_switch_dealloc_dp_resource(src_port->sw, src_port); tb_switch_dealloc_dp_resource(src_port->sw, src_port);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
pm_runtime_mark_last_busy(&src_port->sw->dev);
pm_runtime_put_autosuspend(&src_port->sw->dev);
fallthrough; fallthrough;
case TB_TUNNEL_USB3: case TB_TUNNEL_USB3:
@ -682,6 +733,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
if (port->remote->sw->is_unplugged) { if (port->remote->sw->is_unplugged) {
tb_retimer_remove_all(port); tb_retimer_remove_all(port);
tb_remove_dp_resources(port->remote->sw); tb_remove_dp_resources(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw); tb_switch_remove(port->remote->sw);
port->remote = NULL; port->remote = NULL;
@ -821,9 +873,20 @@ static void tb_tunnel_dp(struct tb *tb)
return; return;
} }
/*
* DP stream needs the domain to be active so runtime resume
* both ends of the tunnel.
*
* This should bring the routers in the middle active as well
* and keeps the domain from runtime suspending while the DP
* tunnel is active.
*/
pm_runtime_get_sync(&in->sw->dev);
pm_runtime_get_sync(&out->sw->dev);
if (tb_switch_alloc_dp_resource(in->sw, in)) { if (tb_switch_alloc_dp_resource(in->sw, in)) {
tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
return; goto err_rpm_put;
} }
/* Make all unused USB3 bandwidth available for the new DP tunnel */ /* Make all unused USB3 bandwidth available for the new DP tunnel */
@ -862,6 +925,11 @@ err_reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out); tb_reclaim_usb3_bandwidth(tb, in, out);
err_dealloc_dp: err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in); tb_switch_dealloc_dp_resource(in->sw, in);
err_rpm_put:
pm_runtime_mark_last_busy(&out->sw->dev);
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
} }
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@ -911,6 +979,29 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_tunnel_dp(tb); tb_tunnel_dp(tb);
} }
static void tb_disconnect_and_release_dp(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
/*
* Tear down all DP tunnels and release their resources. They
* will be re-established after resume based on plug events.
*/
list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel))
tb_deactivate_and_free_tunnel(tunnel);
}
while (!list_empty(&tcm->dp_resources)) {
struct tb_port *port;
port = list_first_entry(&tcm->dp_resources,
struct tb_port, list);
list_del_init(&port->list);
}
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{ {
struct tb_port *up, *down, *port; struct tb_port *up, *down, *port;
@ -1022,6 +1113,10 @@ static void tb_handle_hotplug(struct work_struct *work)
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw; struct tb_switch *sw;
struct tb_port *port; struct tb_port *port;
/* Bring the domain back from sleep if it was suspended */
pm_runtime_get_sync(&tb->dev);
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
if (!tcm->hotplug_active) if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */ goto out; /* during init, suspend or shutdown */
@ -1045,6 +1140,9 @@ static void tb_handle_hotplug(struct work_struct *work)
ev->route, ev->port, ev->unplug); ev->route, ev->port, ev->unplug);
goto put_sw; goto put_sw;
} }
pm_runtime_get_sync(&sw->dev);
if (ev->unplug) { if (ev->unplug) {
tb_retimer_remove_all(port); tb_retimer_remove_all(port);
@ -1054,6 +1152,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_free_invalid_tunnels(tb); tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw); tb_remove_dp_resources(port->remote->sw);
tb_switch_tmu_disable(port->remote->sw); tb_switch_tmu_disable(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw); tb_switch_remove(port->remote->sw);
port->remote = NULL; port->remote = NULL;
@ -1077,6 +1176,7 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL; port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd); __tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd); tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
tb_dp_resource_unavailable(tb, port); tb_dp_resource_unavailable(tb, port);
} else { } else {
@ -1096,10 +1196,17 @@ static void tb_handle_hotplug(struct work_struct *work)
} }
} }
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
put_sw: put_sw:
tb_switch_put(sw); tb_switch_put(sw);
out: out:
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
kfree(ev); kfree(ev);
} }
@ -1135,6 +1242,7 @@ static void tb_stop(struct tb *tb)
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_tunnel *n; struct tb_tunnel *n;
cancel_delayed_work(&tcm->remove_work);
/* tunnels are only present after everything has been initialized */ /* tunnels are only present after everything has been initialized */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
/* /*
@ -1186,6 +1294,8 @@ static int tb_start(struct tb *tb)
* root switch. * root switch.
*/ */
tb->root_switch->no_nvm_upgrade = true; tb->root_switch->no_nvm_upgrade = true;
/* All USB4 routers support runtime PM */
tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
ret = tb_switch_configure(tb->root_switch); ret = tb_switch_configure(tb->root_switch);
if (ret) { if (ret) {
@ -1227,7 +1337,8 @@ static int tb_suspend_noirq(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
tb_dbg(tb, "suspending...\n"); tb_dbg(tb, "suspending...\n");
tb_switch_suspend(tb->root_switch); tb_disconnect_and_release_dp(tb);
tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n"); tb_dbg(tb, "suspend finished\n");
@ -1238,17 +1349,25 @@ static void tb_restore_children(struct tb_switch *sw)
{ {
struct tb_port *port; struct tb_port *port;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
if (tb_enable_tmu(sw)) if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n"); tb_sw_warn(sw, "failed to restore TMU configuration\n");
tb_switch_for_each_port(sw, port) { tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port)) if (!tb_port_has_remote(port) && !port->xdomain)
continue; continue;
if (tb_switch_lane_bonding_enable(port->remote->sw)) if (port->remote) {
dev_warn(&sw->dev, "failed to restore lane bonding\n"); tb_switch_lane_bonding_enable(port->remote->sw);
tb_switch_configure_link(port->remote->sw);
tb_restore_children(port->remote->sw); tb_restore_children(port->remote->sw);
} else if (port->xdomain) {
tb_port_configure_xdomain(port);
}
} }
} }
@ -1260,7 +1379,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n"); tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */ /* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0); tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch); tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb); tb_free_invalid_tunnels(tb);
@ -1294,6 +1413,7 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw)
if (port->xdomain && port->xdomain->is_unplugged) { if (port->xdomain && port->xdomain->is_unplugged) {
tb_retimer_remove_all(port); tb_retimer_remove_all(port);
tb_xdomain_remove(port->xdomain); tb_xdomain_remove(port->xdomain);
tb_port_unconfigure_xdomain(port);
port->xdomain = NULL; port->xdomain = NULL;
ret++; ret++;
} else if (port->remote) { } else if (port->remote) {
@ -1304,6 +1424,22 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw)
return ret; return ret;
} }
static int tb_freeze_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
tcm->hotplug_active = false;
return 0;
}
static int tb_thaw_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
tcm->hotplug_active = true;
return 0;
}
static void tb_complete(struct tb *tb) static void tb_complete(struct tb *tb)
{ {
/* /*
@ -1317,12 +1453,64 @@ static void tb_complete(struct tb *tb)
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
} }
static int tb_runtime_suspend(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
mutex_lock(&tb->lock);
tb_switch_suspend(tb->root_switch, true);
tcm->hotplug_active = false;
mutex_unlock(&tb->lock);
return 0;
}
static void tb_remove_work(struct work_struct *work)
{
struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
struct tb *tb = tcm_to_tb(tcm);
mutex_lock(&tb->lock);
if (tb->root_switch) {
tb_free_unplugged_children(tb->root_switch);
tb_free_unplugged_xdomains(tb->root_switch);
}
mutex_unlock(&tb->lock);
}
static int tb_runtime_resume(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
/*
* Schedule cleanup of any unplugged devices. Run this in a
* separate thread to avoid possible deadlock if the device
* removal runtime resumes the unplugged device.
*/
queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
return 0;
}
static const struct tb_cm_ops tb_cm_ops = { static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start, .start = tb_start,
.stop = tb_stop, .stop = tb_stop,
.suspend_noirq = tb_suspend_noirq, .suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq, .resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
.thaw_noirq = tb_thaw_noirq,
.complete = tb_complete, .complete = tb_complete,
.runtime_suspend = tb_runtime_suspend,
.runtime_resume = tb_runtime_resume,
.handle_event = tb_handle_event, .handle_event = tb_handle_event,
.approve_switch = tb_tunnel_pci, .approve_switch = tb_tunnel_pci,
.approve_xdomain_paths = tb_approve_xdomain_paths, .approve_xdomain_paths = tb_approve_xdomain_paths,
@ -1344,6 +1532,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb); tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list); INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources); INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
return tb; return tb;
} }

View File

@ -125,6 +125,7 @@ struct tb_switch_tmu {
* @rpm: The switch supports runtime PM * @rpm: The switch supports runtime PM
* @authorized: Whether the switch is authorized by user or policy * @authorized: Whether the switch is authorized by user or policy
* @security_level: Switch supported security level * @security_level: Switch supported security level
* @debugfs_dir: Pointer to the debugfs structure
* @key: Contains the key used to challenge the device or %NULL if not * @key: Contains the key used to challenge the device or %NULL if not
* supported. Size of the key is %TB_SWITCH_KEY_SIZE. * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
* @connection_id: Connection ID used with ICM messaging * @connection_id: Connection ID used with ICM messaging
@ -166,6 +167,7 @@ struct tb_switch {
bool rpm; bool rpm;
unsigned int authorized; unsigned int authorized;
enum tb_security_level security_level; enum tb_security_level security_level;
struct dentry *debugfs_dir;
u8 *key; u8 *key;
u8 connection_id; u8 connection_id;
u8 connection_key; u8 connection_key;
@ -333,6 +335,13 @@ struct tb_path {
*/ */
#define TB_PATH_MAX_HOPS (7 * 2) #define TB_PATH_MAX_HOPS (7 * 2)
/* Possible wake types */
#define TB_WAKE_ON_CONNECT BIT(0)
#define TB_WAKE_ON_DISCONNECT BIT(1)
#define TB_WAKE_ON_USB4 BIT(2)
#define TB_WAKE_ON_USB3 BIT(3)
#define TB_WAKE_ON_PCIE BIT(4)
/** /**
* struct tb_cm_ops - Connection manager specific operations vector * struct tb_cm_ops - Connection manager specific operations vector
* @driver_ready: Called right after control channel is started. Used by * @driver_ready: Called right after control channel is started. Used by
@ -342,6 +351,8 @@ struct tb_path {
* @suspend_noirq: Connection manager specific suspend_noirq * @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq * @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend * @suspend: Connection manager specific suspend
* @freeze_noirq: Connection manager specific freeze_noirq
* @thaw_noirq: Connection manager specific thaw_noirq
* @complete: Connection manager specific complete * @complete: Connection manager specific complete
* @runtime_suspend: Connection manager specific runtime_suspend * @runtime_suspend: Connection manager specific runtime_suspend
* @runtime_resume: Connection manager specific runtime_resume * @runtime_resume: Connection manager specific runtime_resume
@ -364,6 +375,8 @@ struct tb_cm_ops {
int (*suspend_noirq)(struct tb *tb); int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb); int (*suspend)(struct tb *tb);
int (*freeze_noirq)(struct tb *tb);
int (*thaw_noirq)(struct tb *tb);
void (*complete)(struct tb *tb); void (*complete)(struct tb *tb);
int (*runtime_suspend)(struct tb *tb); int (*runtime_suspend)(struct tb *tb);
int (*runtime_resume)(struct tb *tb); int (*runtime_resume)(struct tb *tb);
@ -457,6 +470,11 @@ static inline bool tb_port_is_null(const struct tb_port *port)
return port && port->port && port->config.type == TB_TYPE_PORT; return port && port->port && port->config.type == TB_TYPE_PORT;
} }
static inline bool tb_port_is_nhi(const struct tb_port *port)
{
return port && port->config.type == TB_TYPE_NHI;
}
static inline bool tb_port_is_pcie_down(const struct tb_port *port) static inline bool tb_port_is_pcie_down(const struct tb_port *port)
{ {
return port && port->config.type == TB_TYPE_PCIE_DOWN; return port && port->config.type == TB_TYPE_PCIE_DOWN;
@ -593,6 +611,8 @@ void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb);
int tb_domain_suspend(struct tb *tb); int tb_domain_suspend(struct tb *tb);
int tb_domain_freeze_noirq(struct tb *tb);
int tb_domain_thaw_noirq(struct tb *tb);
void tb_domain_complete(struct tb *tb); void tb_domain_complete(struct tb *tb);
int tb_domain_runtime_suspend(struct tb *tb); int tb_domain_runtime_suspend(struct tb *tb);
int tb_domain_runtime_resume(struct tb *tb); int tb_domain_runtime_resume(struct tb *tb);
@ -632,9 +652,9 @@ struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
int tb_switch_configure(struct tb_switch *sw); int tb_switch_configure(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw); void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw); int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route); int tb_switch_reset(struct tb_switch *sw);
void tb_sw_set_unplugged(struct tb_switch *sw); void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_port *tb_switch_find_port(struct tb_switch *sw, struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type); enum tb_port_type type);
@ -685,59 +705,89 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{ {
return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
} }
static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{ {
return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
} }
static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{ {
switch (sw->config.device_id) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
return true; case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
default: return true;
return false; }
} }
return false;
} }
static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{ {
switch (sw->config.device_id) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
return true; case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
default: return true;
return false; }
} }
return false;
} }
static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
{ {
switch (sw->config.device_id) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
return true; case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
default: return true;
return false; }
} }
return false;
} }
static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
{ {
switch (sw->config.device_id) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
return true; case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
default: return true;
return false; }
} }
return false;
}
static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return true;
}
}
return false;
}
static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_TGL_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
return true;
}
}
return false;
} }
/** /**
@ -767,6 +817,8 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw)
int tb_switch_lane_bonding_enable(struct tb_switch *sw); int tb_switch_lane_bonding_enable(struct tb_switch *sw);
void tb_switch_lane_bonding_disable(struct tb_switch *sw); void tb_switch_lane_bonding_disable(struct tb_switch *sw);
int tb_switch_configure_link(struct tb_switch *sw);
void tb_switch_unconfigure_link(struct tb_switch *sw);
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
@ -788,6 +840,8 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_port_unlock(struct tb_port *port); int tb_port_unlock(struct tb_port *port);
int tb_port_enable(struct tb_port *port);
int tb_port_disable(struct tb_port *port);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid);
int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@ -811,7 +865,9 @@ int tb_port_get_link_speed(struct tb_port *port);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
int tb_port_next_cap(struct tb_port *port, unsigned int offset);
bool tb_port_is_enabled(struct tb_port *port); bool tb_port_is_enabled(struct tb_port *port);
bool tb_usb3_port_is_enabled(struct tb_port *port); bool tb_usb3_port_is_enabled(struct tb_port *port);
@ -844,8 +900,11 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_configure_link(struct tb_switch *sw); int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_link(struct tb_switch *sw); void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
void tb_lc_unconfigure_xdomain(struct tb_port *port);
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
int tb_lc_set_sleep(struct tb_switch *sw); int tb_lc_set_sleep(struct tb_switch *sw);
bool tb_lc_lane_bonding_possible(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
@ -900,9 +959,8 @@ int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size); size_t size);
int usb4_switch_configure_link(struct tb_switch *sw);
void usb4_switch_unconfigure_link(struct tb_switch *sw);
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
@ -919,6 +977,10 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port); const struct tb_port *port);
int usb4_port_unlock(struct tb_port *port); int usb4_port_unlock(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port);
void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
@ -945,9 +1007,35 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw); int *downstream_bw);
/* keep link controller awake during update */ /* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
void tb_check_quirks(struct tb_switch *sw); void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
void tb_acpi_add_links(struct tb_nhi *nhi);
#else
static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
#endif
#ifdef CONFIG_DEBUG_FS
void tb_debugfs_init(void);
void tb_debugfs_exit(void);
void tb_switch_debugfs_init(struct tb_switch *sw);
void tb_switch_debugfs_remove(struct tb_switch *sw);
#else
static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
#endif
#ifdef CONFIG_USB4_KUNIT_TEST
int tb_test_init(void);
void tb_test_exit(void);
#else
static inline int tb_test_init(void) { return 0; }
static inline void tb_test_exit(void) { }
#endif
#endif #endif

View File

@ -28,6 +28,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_LOOP = 8, TB_CFG_ERROR_LOOP = 8,
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12, TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
}; };
/* common header */ /* common header */

View File

@ -39,6 +39,7 @@ enum tb_switch_vse_cap {
enum tb_port_cap { enum tb_port_cap {
TB_PORT_CAP_PHY = 0x01, TB_PORT_CAP_PHY = 0x01,
TB_PORT_CAP_POWER = 0x02,
TB_PORT_CAP_TIME1 = 0x03, TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04, TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05, TB_PORT_CAP_VSE = 0x05,
@ -93,6 +94,20 @@ struct tb_cap_extended_long {
u16 length; u16 length;
} __packed; } __packed;
/**
* struct tb_cap_any - Structure capable of hold every capability
* @basic: Basic capability
* @extended_short: Vendor specific capability
* @extended_long: Vendor specific extended capability
*/
struct tb_cap_any {
union {
struct tb_cap_basic basic;
struct tb_cap_extended_short extended_short;
struct tb_cap_extended_long extended_long;
};
} __packed;
/* capabilities */ /* capabilities */
struct tb_cap_link_controller { struct tb_cap_link_controller {
@ -178,6 +193,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_4 0x04 #define ROUTER_CS_4 0x04
#define ROUTER_CS_5 0x05 #define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0) #define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_C3S BIT(23) #define ROUTER_CS_5_C3S BIT(23)
#define ROUTER_CS_5_PTO BIT(24) #define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25) #define ROUTER_CS_5_UTO BIT(25)
@ -186,6 +203,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_6 0x06 #define ROUTER_CS_6 0x06
#define ROUTER_CS_6_SLPR BIT(0) #define ROUTER_CS_6_SLPR BIT(0)
#define ROUTER_CS_6_TNS BIT(1) #define ROUTER_CS_6_TNS BIT(1)
#define ROUTER_CS_6_WOPS BIT(2)
#define ROUTER_CS_6_WOUS BIT(3)
#define ROUTER_CS_6_HCI BIT(18) #define ROUTER_CS_6_HCI BIT(18)
#define ROUTER_CS_6_CR BIT(25) #define ROUTER_CS_6_CR BIT(25)
#define ROUTER_CS_7 0x07 #define ROUTER_CS_7 0x07
@ -234,7 +253,8 @@ struct tb_regs_port_header {
/* DWORD 1 */ /* DWORD 1 */
u32 first_cap_offset:8; u32 first_cap_offset:8;
u32 max_counters:11; u32 max_counters:11;
u32 __unknown1:5; u32 counters_support:1;
u32 __unknown1:4;
u32 revision:8; u32 revision:8;
/* DWORD 2 */ /* DWORD 2 */
enum tb_port_type type:24; enum tb_port_type type:24;
@ -279,6 +299,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15) #define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) #define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 #define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
@ -301,8 +322,13 @@ struct tb_regs_port_header {
#define PORT_CS_18 0x12 #define PORT_CS_18 0x12
#define PORT_CS_18_BE BIT(8) #define PORT_CS_18_BE BIT(8)
#define PORT_CS_18_TCM BIT(9) #define PORT_CS_18_TCM BIT(9)
#define PORT_CS_18_WOU4S BIT(18)
#define PORT_CS_19 0x13 #define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3) #define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
#define PORT_CS_19_WOD BIT(17)
#define PORT_CS_19_WOU4 BIT(18)
/* Display Port adapter registers */ /* Display Port adapter registers */
#define ADP_DP_CS_0 0x00 #define ADP_DP_CS_0 0x00
@ -416,8 +442,14 @@ struct tb_regs_hop {
#define TB_LC_PORT_ATTR_BE BIT(12) #define TB_LC_PORT_ATTR_BE BIT(12)
#define TB_LC_SX_CTRL 0x96 #define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_WOC BIT(1)
#define TB_LC_SX_CTRL_WOD BIT(2)
#define TB_LC_SX_CTRL_WOU4 BIT(5)
#define TB_LC_SX_CTRL_WOP BIT(6)
#define TB_LC_SX_CTRL_L1C BIT(16) #define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L1D BIT(17)
#define TB_LC_SX_CTRL_L2C BIT(20) #define TB_LC_SX_CTRL_L2C BIT(20)
#define TB_LC_SX_CTRL_L2D BIT(21)
#define TB_LC_SX_CTRL_UPSTREAM BIT(30) #define TB_LC_SX_CTRL_UPSTREAM BIT(30)
#define TB_LC_SX_CTRL_SLP BIT(31) #define TB_LC_SX_CTRL_SLP BIT(31)

View File

@ -1623,4 +1623,15 @@ static struct kunit_suite tb_test_suite = {
.name = "thunderbolt", .name = "thunderbolt",
.test_cases = tb_test_cases, .test_cases = tb_test_cases,
}; };
kunit_test_suite(tb_test_suite);
static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
int tb_test_init(void)
{
return __kunit_test_suites_init(tb_test_suites);
}
void tb_test_exit(void)
{
return __kunit_test_suites_exit(tb_test_suites);
}

View File

@ -196,6 +196,46 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
return 0; return 0;
} }
static void usb4_switch_check_wakes(struct tb_switch *sw)
{
struct tb_port *port;
bool wakeup = false;
u32 val;
if (!device_may_wakeup(&sw->dev))
return;
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
(val & ROUTER_CS_6_WOPS) ? "yes" : "no",
(val & ROUTER_CS_6_WOUS) ? "yes" : "no");
wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
}
/* Check for any connected downstream ports for USB4 wake */
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_18, 1))
break;
tb_port_dbg(port, "USB4 wake: %s\n",
(val & PORT_CS_18_WOU4S) ? "yes" : "no");
if (val & PORT_CS_18_WOU4S)
wakeup = true;
}
if (wakeup)
pm_wakeup_event(&sw->dev, 0);
}
static bool link_is_usb4(struct tb_port *port) static bool link_is_usb4(struct tb_port *port)
{ {
u32 val; u32 val;
@ -229,6 +269,8 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0; u32 val = 0;
int ret; int ret;
usb4_switch_check_wakes(sw);
if (!tb_route(sw)) if (!tb_route(sw))
return 0; return 0;
@ -338,60 +380,6 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
usb4_switch_drom_read_block, sw); usb4_switch_drom_read_block, sw);
} }
static int usb4_set_port_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PC;
else
val &= ~PORT_CS_19_PC;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_switch_configure_link() - Set upstream USB4 link configured
* @sw: USB4 router
*
* Sets the upstream USB4 link to be configured for power management
* purposes.
*/
int usb4_switch_configure_link(struct tb_switch *sw)
{
struct tb_port *up;
if (!tb_route(sw))
return 0;
up = tb_upstream_port(sw);
return usb4_set_port_configured(up, true);
}
/**
* usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
* @sw: USB4 router
*
* Reverse of usb4_switch_configure_link().
*/
void usb4_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up;
if (sw->is_unplugged || !tb_route(sw))
return;
up = tb_upstream_port(sw);
usb4_set_port_configured(up, false);
}
/** /**
* usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
* @sw: USB4 router * @sw: USB4 router
@ -413,12 +401,78 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
return !!(val & PORT_CS_18_BE); return !!(val & PORT_CS_18_BE);
} }
/**
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
*
* Enables/disables router to wake up from sleep.
*/
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
int ret;
/*
* Enable wakes coming from all USB4 downstream ports (from
* child routers). For device routers do this also for the
* upstream USB4 port.
*/
tb_switch_for_each_port(sw, port) {
if (!route && tb_is_upstream_port(port))
continue;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
if (flags & TB_WAKE_ON_CONNECT)
val |= PORT_CS_19_WOC;
if (flags & TB_WAKE_ON_DISCONNECT)
val |= PORT_CS_19_WOD;
if (flags & TB_WAKE_ON_USB4)
val |= PORT_CS_19_WOU4;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
}
/*
* Enable wakes from PCIe and USB 3.x on this router. Only
* needed for device routers.
*/
if (route) {
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
if (flags & TB_WAKE_ON_USB3)
val |= ROUTER_CS_5_WOU;
if (flags & TB_WAKE_ON_PCIE)
val |= ROUTER_CS_5_WOP;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
}
return 0;
}
/** /**
* usb4_switch_set_sleep() - Prepare the router to enter sleep * usb4_switch_set_sleep() - Prepare the router to enter sleep
* @sw: USB4 router * @sw: USB4 router
* *
* Enables wakes and sets sleep bit for the router. Returns when the * Sets sleep bit for the router. Returns when the router sleep ready
* router sleep ready bit has been asserted. * bit has been asserted.
*/ */
int usb4_switch_set_sleep(struct tb_switch *sw) int usb4_switch_set_sleep(struct tb_switch *sw)
{ {
@ -795,6 +849,95 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
} }
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PC;
else
val &= ~PORT_CS_19_PC;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure() - Set USB4 port configured
* @port: USB4 router
*
* Sets the USB4 link to be configured for power management purposes.
*/
int usb4_port_configure(struct tb_port *port)
{
return usb4_port_set_configured(port, true);
}
/**
* usb4_port_unconfigure() - Set USB4 port unconfigured
* @port: USB4 router
*
* Sets the USB4 link to be unconfigured for power management purposes.
*/
void usb4_port_unconfigure(struct tb_port *port)
{
usb4_port_set_configured(port, false);
}
static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PID;
else
val &= ~PORT_CS_19_PID;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
*
* Marks the USB4 port as being connected to another host. Returns %0 in
* success and negative errno in failure.
*/
int usb4_port_configure_xdomain(struct tb_port *port)
{
return usb4_set_xdomain_configured(port, true);
}
/**
* usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
* @port: USB4 port that was connected to another host
*
* Clears USB4 port from being marked as XDomain.
*/
void usb4_port_unconfigure_xdomain(struct tb_port *port)
{
usb4_set_xdomain_configured(port, false);
}
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
u32 value, int timeout_msec) u32 value, int timeout_msec)
{ {