mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
thunderbolt: Changes for v5.9 merge window
This includes following Thunderbolt/USB4 changes for v5.9 merge window: * Improvements around NHI (Native Host Interface) HopID allocation * Improvements to tunneling and USB3 bandwidth management support * Add KUnit tests for path walking and tunneling * Initial support for USB4 retimer firmware upgrade * Implement Thunderbolt device firmware upgrade mechanism that runs the NVM image authentication when the device is disconnected. * A couple of small non-critical fixes -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAl8WypkgHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKASJA/+Nigz8Pjg3m53 p47ZvZTuFE0SP66OcUaUH5IYibGOwSgN9PVpAqlgeBdHHKq6kb8T7Ej1D7+UZIBe ghQQDkBr3DSbIuFgSMmMvvoJV2a47HPfNYBS8r5Lr8YrvWm8/HJRzsNJTZWvpKSb g7w2ZsEnLTXOiYDInIJHhOqTtYZErpA/gzkihBeQLXiYarsoveVbfFJ5pvHzdYJb xT2gY4hNTF/C4IU/s4y3o+O3f8vb77ZiStZq9ET+xCI+oEDtGgB+OkjMqF+RTEWQ zSy0F2VtxNnzkXqDh64zwBWnJX432gd3vZhlx8Tr+HOD6NpROsim4ZRLxTYvqJ+J 1NmIKYG+oYPocet/j26fVmmaQabF3Dx8ALhe5EBoQ7jKtw34WEP89zgWOFr+58Ey YD3yby1guNzrVC4N9qKRkHwAgNLZ4gzL5xAhkxCg1I1mjPmi3dxX6T75+5EKRhRk hRcZCDkpHjKvwPjGOaFiqzgA4rpP0mmBh5NIdD95pPB4402jj12CXnZR/X2upakv nQpR+SRPF97zlGnriiffwayuoV18U9bDzJBKPRvfSEoPHBK38OX3hH4SHUAwS+y4 AMTHlm7lMZswQL42Y+3h6a6puT9ydY2xmU+IxBoFDHp1qgLhmFh66pwdjxb5TXvp 7+hM62VALOqgaK+5kUB+9U4a21TmcLY= =FHld -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next Mika writes: thunderbolt: Changes for v5.9 merge window This includes following Thunderbolt/USB4 changes for v5.9 merge window: * Improvements around NHI (Native Host Interface) HopID allocation * Improvements to tunneling and USB3 bandwidth management support * Add KUnit tests for path walking and tunneling * Initial support for USB4 retimer firmware upgrade * Implement Thunderbolt device firmware upgrade mechanism that runs the NVM image authentication when the device is disconnected. * A couple of small non-critical fixes * tag 'thunderbolt-for-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (32 commits) thunderbolt: Fix old style declaration warning thunderbolt: Add support for authenticate on disconnect thunderbolt: Add support for separating the flush to SPI and authenticate thunderbolt: Ensure left shift of 512 does not overflow a 32 bit int thunderbolt: Add support for on-board retimers thunderbolt: Implement USB4 port sideband operations for retimer access thunderbolt: Retry USB4 block read operation thunderbolt: Generalize usb4_switch_do_[read|write]_data() thunderbolt: Split common NVM functionality into a separate file thunderbolt: Add Intel USB-IF ID to the NVM upgrade supported list thunderbolt: Add KUnit tests for tunneling thunderbolt: Add USB3 bandwidth management thunderbolt: Make tb_port_get_link_speed() available to other files thunderbolt: Implement USB3 bandwidth negotiation routines thunderbolt: Increase DP DPRX wait timeout thunderbolt: Report consumed bandwidth in both directions thunderbolt: Make usb4_switch_map_pcie_down() also return enabled ports thunderbolt: Make usb4_switch_map_usb3_down() also return enabled ports thunderbolt: Do not tunnel USB3 if link is not USB4 thunderbolt: Add DP IN resources for all routers ...
This commit is contained in:
commit
6c9a9a8ddf
@ -178,11 +178,18 @@ KernelVersion: 4.13
|
||||
Contact: thunderbolt-software@lists.01.org
|
||||
Description: When new NVM image is written to the non-active NVM
|
||||
area (through non_activeX NVMem device), the
|
||||
authentication procedure is started by writing 1 to
|
||||
this file. If everything goes well, the device is
|
||||
authentication procedure is started by writing to
|
||||
this file.
|
||||
If everything goes well, the device is
|
||||
restarted with the new NVM firmware. If the image
|
||||
verification fails an error code is returned instead.
|
||||
|
||||
This file will accept writing values "1" or "2"
|
||||
- Writing "1" will flush the image to the storage
|
||||
area and authenticate the image in one action.
|
||||
- Writing "2" will run some basic validation on the image
|
||||
and flush it to the storage area.
|
||||
|
||||
When read holds status of the last authentication
|
||||
operation if an error occurred during the process. This
|
||||
is directly the status value from the DMA configuration
|
||||
@ -236,3 +243,49 @@ KernelVersion: 4.15
|
||||
Contact: thunderbolt-software@lists.01.org
|
||||
Description: This contains XDomain service specific settings as
|
||||
bitmask. Format: %x
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/<device>:<port>.<index>/device
|
||||
Date: Oct 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
Description: Retimer device identifier read from the hardware.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/<device>:<port>.<index>/nvm_authenticate
|
||||
Date: Oct 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
Description: When new NVM image is written to the non-active NVM
|
||||
area (through non_activeX NVMem device), the
|
||||
authentication procedure is started by writing 1 to
|
||||
this file. If everything goes well, the device is
|
||||
restarted with the new NVM firmware. If the image
|
||||
verification fails an error code is returned instead.
|
||||
|
||||
When read holds status of the last authentication
|
||||
operation if an error occurred during the process.
|
||||
Format: %x.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/<device>:<port>.<index>/nvm_version
|
||||
Date: Oct 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
Description: Holds retimer NVM version number. Format: %x.%x, major.minor.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/<device>:<port>.<index>/vendor
|
||||
Date: Oct 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
Description: Retimer vendor identifier read from the hardware.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/.../nvm_authenticate_on_disconnect
|
||||
Date: Oct 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: Mario Limonciello <mario.limonciello@dell.com>
|
||||
Description: For supported devices, automatically authenticate the new Thunderbolt
|
||||
image when the device is disconnected from the host system.
|
||||
|
||||
This file will accept writing values "1" or "2"
|
||||
- Writing "1" will flush the image to the storage
|
||||
area and prepare the device for authentication on disconnect.
|
||||
- Writing "2" will run some basic validation on the image
|
||||
and flush it to the storage area.
|
||||
|
@ -173,8 +173,8 @@ following ``udev`` rule::
|
||||
|
||||
ACTION=="add", SUBSYSTEM=="thunderbolt", ATTRS{iommu_dma_protection}=="1", ATTR{authorized}=="0", ATTR{authorized}="1"
|
||||
|
||||
Upgrading NVM on Thunderbolt device or host
|
||||
-------------------------------------------
|
||||
Upgrading NVM on Thunderbolt device, host or retimer
|
||||
----------------------------------------------------
|
||||
Since most of the functionality is handled in firmware running on a
|
||||
host controller or a device, it is important that the firmware can be
|
||||
upgraded to the latest where possible bugs in it have been fixed.
|
||||
@ -185,9 +185,10 @@ for some machines:
|
||||
|
||||
`Thunderbolt Updates <https://thunderbolttechnology.net/updates>`_
|
||||
|
||||
Before you upgrade firmware on a device or host, please make sure it is a
|
||||
suitable upgrade. Failing to do that may render the device (or host) in a
|
||||
state where it cannot be used properly anymore without special tools!
|
||||
Before you upgrade firmware on a device, host or retimer, please make
|
||||
sure it is a suitable upgrade. Failing to do that may render the device
|
||||
in a state where it cannot be used properly anymore without special
|
||||
tools!
|
||||
|
||||
Host NVM upgrade on Apple Macs is not supported.
|
||||
|
||||
|
@ -866,8 +866,8 @@ static int tbnet_open(struct net_device *dev)
|
||||
eof_mask = BIT(TBIP_PDF_FRAME_END);
|
||||
|
||||
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
|
||||
RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask,
|
||||
eof_mask, tbnet_start_poll, net);
|
||||
RING_FLAG_FRAME, sof_mask, eof_mask,
|
||||
tbnet_start_poll, net);
|
||||
if (!ring) {
|
||||
netdev_err(dev, "failed to allocate Rx ring\n");
|
||||
tb_ring_free(net->tx_ring.ring);
|
||||
|
@ -8,10 +8,15 @@ menuconfig USB4
|
||||
select CRYPTO_HASH
|
||||
select NVMEM
|
||||
help
|
||||
USB4 and Thunderbolt driver. USB4 is the public speficiation
|
||||
based on Thunderbolt 3 protocol. This driver is required if
|
||||
USB4 and Thunderbolt driver. USB4 is the public specification
|
||||
based on the Thunderbolt 3 protocol. This driver is required if
|
||||
you want to hotplug Thunderbolt and USB4 compliant devices on
|
||||
Apple hardware or on PCs with Intel Falcon Ridge or newer.
|
||||
|
||||
To compile this driver a module, choose M here. The module will be
|
||||
called thunderbolt.
|
||||
|
||||
config USB4_KUNIT_TEST
|
||||
bool "KUnit tests"
|
||||
depends on KUNIT=y
|
||||
depends on USB4=y
|
||||
|
@ -2,3 +2,6 @@
|
||||
obj-${CONFIG_USB4} := thunderbolt.o
|
||||
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
|
||||
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
|
||||
thunderbolt-objs += nvm.o retimer.o quirks.o
|
||||
|
||||
obj-${CONFIG_USB4_KUNIT_TEST} += test.o
|
||||
|
@ -812,6 +812,6 @@ void tb_domain_exit(void)
|
||||
{
|
||||
bus_unregister(&tb_bus_type);
|
||||
ida_destroy(&tb_domain_ida);
|
||||
tb_switch_exit();
|
||||
tb_nvm_exit();
|
||||
tb_xdomain_exit();
|
||||
}
|
||||
|
@ -599,6 +599,7 @@ parse:
|
||||
sw->uid = header->uid;
|
||||
sw->vendor = header->vendor_id;
|
||||
sw->device = header->model_id;
|
||||
tb_check_quirks(sw);
|
||||
|
||||
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
|
||||
if (crc != header->data_crc32) {
|
||||
|
@ -366,3 +366,17 @@ int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
|
||||
tb_port_dbg(in, "sink %d de-allocated\n", sink);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_lc_force_power() - Forces LC to be powered on
|
||||
* @sw: Thunderbolt switch
|
||||
*
|
||||
* This is useful to let authentication cycle pass even without
|
||||
* a Thunderbolt link present.
|
||||
*/
|
||||
int tb_lc_force_power(struct tb_switch *sw)
|
||||
{
|
||||
u32 in = 0xffff;
|
||||
|
||||
return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
|
||||
}
|
||||
|
@ -24,12 +24,7 @@
|
||||
|
||||
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
|
||||
|
||||
/*
|
||||
* Used to enable end-to-end workaround for missing RX packets. Do not
|
||||
* use this ring for anything else.
|
||||
*/
|
||||
#define RING_E2E_UNUSED_HOPID 2
|
||||
#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID
|
||||
#define RING_FIRST_USABLE_HOPID 1
|
||||
|
||||
/*
|
||||
* Minimal number of vectors when we use MSI-X. Two for control channel
|
||||
@ -440,7 +435,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
|
||||
|
||||
/*
|
||||
* Automatically allocate HopID from the non-reserved
|
||||
* range 8 .. hop_count - 1.
|
||||
* range 1 .. hop_count - 1.
|
||||
*/
|
||||
for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
|
||||
if (ring->is_tx) {
|
||||
@ -496,10 +491,6 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
|
||||
dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
|
||||
transmit ? "TX" : "RX", hop, size);
|
||||
|
||||
/* Tx Ring 2 is reserved for E2E workaround */
|
||||
if (transmit && hop == RING_E2E_UNUSED_HOPID)
|
||||
return NULL;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
return NULL;
|
||||
@ -614,19 +605,6 @@ void tb_ring_start(struct tb_ring *ring)
|
||||
flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
|
||||
}
|
||||
|
||||
if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
|
||||
u32 hop;
|
||||
|
||||
/*
|
||||
* In order not to lose Rx packets we enable end-to-end
|
||||
* workaround which transfers Rx credits to an unused Tx
|
||||
* HopID.
|
||||
*/
|
||||
hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
|
||||
hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
|
||||
flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
|
||||
}
|
||||
|
||||
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
|
||||
if (ring->is_tx) {
|
||||
ring_iowrite32desc(ring, ring->size, 12);
|
||||
@ -1123,9 +1101,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
/* cannot fail - table is allocated bin pcim_iomap_regions */
|
||||
nhi->iobase = pcim_iomap_table(pdev)[0];
|
||||
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
|
||||
if (nhi->hop_count != 12 && nhi->hop_count != 32)
|
||||
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
|
||||
nhi->hop_count);
|
||||
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
|
||||
|
||||
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
|
||||
sizeof(*nhi->tx_rings), GFP_KERNEL);
|
||||
|
170
drivers/thunderbolt/nvm.c
Normal file
170
drivers/thunderbolt/nvm.c
Normal file
@ -0,0 +1,170 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NVM helpers
|
||||
*
|
||||
* Copyright (C) 2020, Intel Corporation
|
||||
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/idr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
static DEFINE_IDA(nvm_ida);
|
||||
|
||||
/**
|
||||
* tb_nvm_alloc() - Allocate new NVM structure
|
||||
* @dev: Device owning the NVM
|
||||
*
|
||||
* Allocates new NVM structure with unique @id and returns it. In case
|
||||
* of error returns ERR_PTR().
|
||||
*/
|
||||
struct tb_nvm *tb_nvm_alloc(struct device *dev)
|
||||
{
|
||||
struct tb_nvm *nvm;
|
||||
int ret;
|
||||
|
||||
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
|
||||
if (!nvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
kfree(nvm);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
nvm->id = ret;
|
||||
nvm->dev = dev;
|
||||
|
||||
return nvm;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_nvm_add_active() - Adds active NVMem device to NVM
|
||||
* @nvm: NVM structure
|
||||
* @size: Size of the active NVM in bytes
|
||||
* @reg_read: Pointer to the function to read the NVM (passed directly to the
|
||||
* NVMem device)
|
||||
*
|
||||
* Registers new active NVmem device for @nvm. The @reg_read is called
|
||||
* directly from NVMem so it must handle possible concurrent access if
|
||||
* needed. The first parameter passed to @reg_read is @nvm structure.
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
|
||||
{
|
||||
struct nvmem_config config;
|
||||
struct nvmem_device *nvmem;
|
||||
|
||||
memset(&config, 0, sizeof(config));
|
||||
|
||||
config.name = "nvm_active";
|
||||
config.reg_read = reg_read;
|
||||
config.read_only = true;
|
||||
config.id = nvm->id;
|
||||
config.stride = 4;
|
||||
config.word_size = 4;
|
||||
config.size = size;
|
||||
config.dev = nvm->dev;
|
||||
config.owner = THIS_MODULE;
|
||||
config.priv = nvm;
|
||||
|
||||
nvmem = nvmem_register(&config);
|
||||
if (IS_ERR(nvmem))
|
||||
return PTR_ERR(nvmem);
|
||||
|
||||
nvm->active = nvmem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_nvm_write_buf() - Write data to @nvm buffer
|
||||
* @nvm: NVM structure
|
||||
* @offset: Offset where to write the data
|
||||
* @val: Data buffer to write
|
||||
* @bytes: Number of bytes to write
|
||||
*
|
||||
* Helper function to cache the new NVM image before it is actually
|
||||
* written to the flash. Copies @bytes from @val to @nvm->buf starting
|
||||
* from @offset.
|
||||
*/
|
||||
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
{
|
||||
if (!nvm->buf) {
|
||||
nvm->buf = vmalloc(NVM_MAX_SIZE);
|
||||
if (!nvm->buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvm->flushed = false;
|
||||
nvm->buf_data_size = offset + bytes;
|
||||
memcpy(nvm->buf + offset, val, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
|
||||
* @nvm: NVM structure
|
||||
* @size: Size of the non-active NVM in bytes
|
||||
* @reg_write: Pointer to the function to write the NVM (passed directly
|
||||
* to the NVMem device)
|
||||
*
|
||||
* Registers new non-active NVmem device for @nvm. The @reg_write is called
|
||||
* directly from NVMem so it must handle possible concurrent access if
|
||||
* needed. The first parameter passed to @reg_write is @nvm structure.
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
|
||||
nvmem_reg_write_t reg_write)
|
||||
{
|
||||
struct nvmem_config config;
|
||||
struct nvmem_device *nvmem;
|
||||
|
||||
memset(&config, 0, sizeof(config));
|
||||
|
||||
config.name = "nvm_non_active";
|
||||
config.reg_write = reg_write;
|
||||
config.root_only = true;
|
||||
config.id = nvm->id;
|
||||
config.stride = 4;
|
||||
config.word_size = 4;
|
||||
config.size = size;
|
||||
config.dev = nvm->dev;
|
||||
config.owner = THIS_MODULE;
|
||||
config.priv = nvm;
|
||||
|
||||
nvmem = nvmem_register(&config);
|
||||
if (IS_ERR(nvmem))
|
||||
return PTR_ERR(nvmem);
|
||||
|
||||
nvm->non_active = nvmem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_nvm_free() - Release NVM and its resources
|
||||
* @nvm: NVM structure to release
|
||||
*
|
||||
* Releases NVM and the NVMem devices if they were registered.
|
||||
*/
|
||||
void tb_nvm_free(struct tb_nvm *nvm)
|
||||
{
|
||||
if (nvm) {
|
||||
if (nvm->non_active)
|
||||
nvmem_unregister(nvm->non_active);
|
||||
if (nvm->active)
|
||||
nvmem_unregister(nvm->active);
|
||||
vfree(nvm->buf);
|
||||
ida_simple_remove(&nvm_ida, nvm->id);
|
||||
}
|
||||
kfree(nvm);
|
||||
}
|
||||
|
||||
void tb_nvm_exit(void)
|
||||
{
|
||||
ida_destroy(&nvm_ida);
|
||||
}
|
@ -229,7 +229,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
|
||||
struct tb_port *dst, int dst_hopid, int link_nr,
|
||||
const char *name)
|
||||
{
|
||||
struct tb_port *in_port, *out_port;
|
||||
struct tb_port *in_port, *out_port, *first_port, *last_port;
|
||||
int in_hopid, out_hopid;
|
||||
struct tb_path *path;
|
||||
size_t num_hops;
|
||||
@ -239,12 +239,23 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
|
||||
if (!path)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Number of hops on a path is the distance between the two
|
||||
* switches plus the source adapter port.
|
||||
*/
|
||||
num_hops = abs(tb_route_length(tb_route(src->sw)) -
|
||||
tb_route_length(tb_route(dst->sw))) + 1;
|
||||
first_port = last_port = NULL;
|
||||
i = 0;
|
||||
tb_for_each_port_on_path(src, dst, in_port) {
|
||||
if (!first_port)
|
||||
first_port = in_port;
|
||||
last_port = in_port;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Check that src and dst are reachable */
|
||||
if (first_port != src || last_port != dst) {
|
||||
kfree(path);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Each hop takes two ports */
|
||||
num_hops = i / 2;
|
||||
|
||||
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
|
||||
if (!path->hops) {
|
||||
@ -559,21 +570,20 @@ bool tb_path_is_invalid(struct tb_path *path)
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_path_switch_on_path() - Does the path go through certain switch
|
||||
* tb_path_port_on_path() - Does the path go through certain port
|
||||
* @path: Path to check
|
||||
* @sw: Switch to check
|
||||
* @port: Switch to check
|
||||
*
|
||||
* Goes over all hops on path and checks if @sw is any of them.
|
||||
* Goes over all hops on path and checks if @port is any of them.
|
||||
* Direction does not matter.
|
||||
*/
|
||||
bool tb_path_switch_on_path(const struct tb_path *path,
|
||||
const struct tb_switch *sw)
|
||||
bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < path->path_length; i++) {
|
||||
if (path->hops[i].in_port->sw == sw ||
|
||||
path->hops[i].out_port->sw == sw)
|
||||
if (path->hops[i].in_port == port ||
|
||||
path->hops[i].out_port == port)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
42
drivers/thunderbolt/quirks.c
Normal file
42
drivers/thunderbolt/quirks.c
Normal file
@ -0,0 +1,42 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Thunderbolt driver - quirks
|
||||
*
|
||||
* Copyright (c) 2020 Mario Limonciello <mario.limonciello@dell.com>
|
||||
*/
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
static void quirk_force_power_link(struct tb_switch *sw)
|
||||
{
|
||||
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
|
||||
}
|
||||
|
||||
struct tb_quirk {
|
||||
u16 vendor;
|
||||
u16 device;
|
||||
void (*hook)(struct tb_switch *sw);
|
||||
};
|
||||
|
||||
static const struct tb_quirk tb_quirks[] = {
|
||||
/* Dell WD19TB supports self-authentication on unplug */
|
||||
{ 0x00d4, 0xb070, quirk_force_power_link },
|
||||
};
|
||||
|
||||
/**
|
||||
* tb_check_quirks() - Check for quirks to apply
|
||||
* @sw: Thunderbolt switch
|
||||
*
|
||||
* Apply any quirks for the Thunderbolt controller
|
||||
*/
|
||||
void tb_check_quirks(struct tb_switch *sw)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) {
|
||||
const struct tb_quirk *q = &tb_quirks[i];
|
||||
|
||||
if (sw->device == q->device && sw->vendor == q->vendor)
|
||||
q->hook(sw);
|
||||
}
|
||||
}
|
485
drivers/thunderbolt/retimer.c
Normal file
485
drivers/thunderbolt/retimer.c
Normal file
@ -0,0 +1,485 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Thunderbolt/USB4 retimer support.
|
||||
*
|
||||
* Copyright (C) 2020, Intel Corporation
|
||||
* Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
|
||||
* Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "sb_regs.h"
|
||||
#include "tb.h"
|
||||
|
||||
#define TB_MAX_RETIMER_INDEX 6
|
||||
|
||||
static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
{
|
||||
struct tb_nvm *nvm = priv;
|
||||
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&rt->dev);
|
||||
|
||||
if (!mutex_trylock(&rt->tb->lock)) {
|
||||
ret = restart_syscall();
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
|
||||
mutex_unlock(&rt->tb->lock);
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(&rt->dev);
|
||||
pm_runtime_put_autosuspend(&rt->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
{
|
||||
struct tb_nvm *nvm = priv;
|
||||
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
|
||||
int ret = 0;
|
||||
|
||||
if (!mutex_trylock(&rt->tb->lock))
|
||||
return restart_syscall();
|
||||
|
||||
ret = tb_nvm_write_buf(nvm, offset, val, bytes);
|
||||
mutex_unlock(&rt->tb->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tb_retimer_nvm_add(struct tb_retimer *rt)
|
||||
{
|
||||
struct tb_nvm *nvm;
|
||||
u32 val, nvm_size;
|
||||
int ret;
|
||||
|
||||
nvm = tb_nvm_alloc(&rt->dev);
|
||||
if (IS_ERR(nvm))
|
||||
return PTR_ERR(nvm);
|
||||
|
||||
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
|
||||
sizeof(val));
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
|
||||
nvm->major = val >> 16;
|
||||
nvm->minor = val >> 8;
|
||||
|
||||
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
|
||||
&val, sizeof(val));
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
|
||||
nvm_size = (SZ_1M << (val & 7)) / 8;
|
||||
nvm_size = (nvm_size - SZ_16K) / 2;
|
||||
|
||||
ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
|
||||
ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
|
||||
rt->nvm = nvm;
|
||||
return 0;
|
||||
|
||||
err_nvm:
|
||||
tb_nvm_free(nvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
|
||||
{
|
||||
unsigned int image_size, hdr_size;
|
||||
const u8 *buf = rt->nvm->buf;
|
||||
u16 ds_size, device;
|
||||
|
||||
image_size = rt->nvm->buf_data_size;
|
||||
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FARB pointer must point inside the image and must at least
|
||||
* contain parts of the digital section we will be reading here.
|
||||
*/
|
||||
hdr_size = (*(u32 *)buf) & 0xffffff;
|
||||
if (hdr_size + NVM_DEVID + 2 >= image_size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Digital section start should be aligned to 4k page */
|
||||
if (!IS_ALIGNED(hdr_size, SZ_4K))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Read digital section size and check that it also fits inside
|
||||
* the image.
|
||||
*/
|
||||
ds_size = *(u16 *)(buf + hdr_size);
|
||||
if (ds_size >= image_size)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Make sure the device ID in the image matches the retimer
|
||||
* hardware.
|
||||
*/
|
||||
device = *(u16 *)(buf + hdr_size + NVM_DEVID);
|
||||
if (device != rt->device)
|
||||
return -EINVAL;
|
||||
|
||||
/* Skip headers in the image */
|
||||
buf += hdr_size;
|
||||
image_size -= hdr_size;
|
||||
|
||||
return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
|
||||
image_size);
|
||||
}
|
||||
|
||||
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
|
||||
return sprintf(buf, "%#x\n", rt->device);
|
||||
}
|
||||
static DEVICE_ATTR_RO(device);
|
||||
|
||||
static ssize_t nvm_authenticate_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
int ret;
|
||||
|
||||
if (!mutex_trylock(&rt->tb->lock))
|
||||
return restart_syscall();
|
||||
|
||||
if (!rt->nvm)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = sprintf(buf, "%#x\n", rt->auth_status);
|
||||
|
||||
mutex_unlock(&rt->tb->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t nvm_authenticate_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
bool val;
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&rt->dev);
|
||||
|
||||
if (!mutex_trylock(&rt->tb->lock)) {
|
||||
ret = restart_syscall();
|
||||
goto exit_rpm;
|
||||
}
|
||||
|
||||
if (!rt->nvm) {
|
||||
ret = -EAGAIN;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
ret = kstrtobool(buf, &val);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
/* Always clear status */
|
||||
rt->auth_status = 0;
|
||||
|
||||
if (val) {
|
||||
if (!rt->nvm->buf) {
|
||||
ret = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
ret = tb_retimer_nvm_validate_and_write(rt);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
mutex_unlock(&rt->tb->lock);
|
||||
exit_rpm:
|
||||
pm_runtime_mark_last_busy(&rt->dev);
|
||||
pm_runtime_put_autosuspend(&rt->dev);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(nvm_authenticate);
|
||||
|
||||
static ssize_t nvm_version_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
int ret;
|
||||
|
||||
if (!mutex_trylock(&rt->tb->lock))
|
||||
return restart_syscall();
|
||||
|
||||
if (!rt->nvm)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
|
||||
|
||||
mutex_unlock(&rt->tb->lock);
|
||||
return ret;
|
||||
}
|
||||
static DEVICE_ATTR_RO(nvm_version);
|
||||
|
||||
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
|
||||
return sprintf(buf, "%#x\n", rt->vendor);
|
||||
}
|
||||
static DEVICE_ATTR_RO(vendor);
|
||||
|
||||
static struct attribute *retimer_attrs[] = {
|
||||
&dev_attr_device.attr,
|
||||
&dev_attr_nvm_authenticate.attr,
|
||||
&dev_attr_nvm_version.attr,
|
||||
&dev_attr_vendor.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group retimer_group = {
|
||||
.attrs = retimer_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *retimer_groups[] = {
|
||||
&retimer_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void tb_retimer_release(struct device *dev)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
|
||||
kfree(rt);
|
||||
}
|
||||
|
||||
struct device_type tb_retimer_type = {
|
||||
.name = "thunderbolt_retimer",
|
||||
.groups = retimer_groups,
|
||||
.release = tb_retimer_release,
|
||||
};
|
||||
|
||||
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
|
||||
{
|
||||
struct tb_retimer *rt;
|
||||
u32 vendor, device;
|
||||
int ret;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return -EINVAL;
|
||||
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
|
||||
sizeof(vendor));
|
||||
if (ret) {
|
||||
if (ret != -ENODEV)
|
||||
tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
|
||||
sizeof(device));
|
||||
if (ret) {
|
||||
if (ret != -ENODEV)
|
||||
tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
|
||||
tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
|
||||
vendor);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that it supports NVM operations. If not then don't add
|
||||
* the device at all.
|
||||
*/
|
||||
ret = usb4_port_retimer_nvm_sector_size(port, index);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
|
||||
if (!rt)
|
||||
return -ENOMEM;
|
||||
|
||||
rt->index = index;
|
||||
rt->vendor = vendor;
|
||||
rt->device = device;
|
||||
rt->auth_status = auth_status;
|
||||
rt->port = port;
|
||||
rt->tb = port->sw->tb;
|
||||
|
||||
rt->dev.parent = &port->sw->dev;
|
||||
rt->dev.bus = &tb_bus_type;
|
||||
rt->dev.type = &tb_retimer_type;
|
||||
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
|
||||
port->port, index);
|
||||
|
||||
ret = device_register(&rt->dev);
|
||||
if (ret) {
|
||||
dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
|
||||
put_device(&rt->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tb_retimer_nvm_add(rt);
|
||||
if (ret) {
|
||||
dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
|
||||
device_del(&rt->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
|
||||
rt->vendor, rt->device);
|
||||
|
||||
pm_runtime_no_callbacks(&rt->dev);
|
||||
pm_runtime_set_active(&rt->dev);
|
||||
pm_runtime_enable(&rt->dev);
|
||||
pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_mark_last_busy(&rt->dev);
|
||||
pm_runtime_use_autosuspend(&rt->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tb_retimer_remove(struct tb_retimer *rt)
|
||||
{
|
||||
dev_info(&rt->dev, "retimer disconnected\n");
|
||||
tb_nvm_free(rt->nvm);
|
||||
device_unregister(&rt->dev);
|
||||
}
|
||||
|
||||
struct tb_retimer_lookup {
|
||||
const struct tb_port *port;
|
||||
u8 index;
|
||||
};
|
||||
|
||||
static int retimer_match(struct device *dev, void *data)
|
||||
{
|
||||
const struct tb_retimer_lookup *lookup = data;
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
|
||||
return rt && rt->port == lookup->port && rt->index == lookup->index;
|
||||
}
|
||||
|
||||
static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
|
||||
{
|
||||
struct tb_retimer_lookup lookup = { .port = port, .index = index };
|
||||
struct device *dev;
|
||||
|
||||
dev = device_find_child(&port->sw->dev, &lookup, retimer_match);
|
||||
if (dev)
|
||||
return tb_to_retimer(dev);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_retimer_scan() - Scan for on-board retimers under port
|
||||
* @port: USB4 port to scan
|
||||
*
|
||||
* Tries to enumerate on-board retimers connected to @port. Found
|
||||
* retimers are registered as children of @port. Does not scan for cable
|
||||
* retimers for now.
|
||||
*/
|
||||
int tb_retimer_scan(struct tb_port *port)
|
||||
{
|
||||
u32 status[TB_MAX_RETIMER_INDEX] = {};
|
||||
int ret, i, last_idx = 0;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Send broadcast RT to make sure retimer indices facing this
|
||||
* port are set.
|
||||
*/
|
||||
ret = usb4_port_enumerate_retimers(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Before doing anything else, read the authentication status.
|
||||
* If the retimer has it set, store it for the new retimer
|
||||
* device instance.
|
||||
*/
|
||||
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
|
||||
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
|
||||
|
||||
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
|
||||
/*
|
||||
* Last retimer is true only for the last on-board
|
||||
* retimer (the one connected directly to the Type-C
|
||||
* port).
|
||||
*/
|
||||
ret = usb4_port_retimer_is_last(port, i);
|
||||
if (ret > 0)
|
||||
last_idx = i;
|
||||
else if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!last_idx)
|
||||
return 0;
|
||||
|
||||
/* Add on-board retimers if they do not exist already */
|
||||
for (i = 1; i <= last_idx; i++) {
|
||||
struct tb_retimer *rt;
|
||||
|
||||
rt = tb_port_find_retimer(port, i);
|
||||
if (rt) {
|
||||
put_device(&rt->dev);
|
||||
} else {
|
||||
ret = tb_retimer_add(port, i, status[i]);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int remove_retimer(struct device *dev, void *data)
|
||||
{
|
||||
struct tb_retimer *rt = tb_to_retimer(dev);
|
||||
struct tb_port *port = data;
|
||||
|
||||
if (rt && rt->port == port)
|
||||
tb_retimer_remove(rt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_retimer_remove_all() - Remove all retimers under port
|
||||
* @port: USB4 port whose retimers to remove
|
||||
*
|
||||
* This removes all previously added retimers under @port.
|
||||
*/
|
||||
void tb_retimer_remove_all(struct tb_port *port)
|
||||
{
|
||||
if (port->cap_usb4)
|
||||
device_for_each_child_reverse(&port->sw->dev, port,
|
||||
remove_retimer);
|
||||
}
|
33
drivers/thunderbolt/sb_regs.h
Normal file
33
drivers/thunderbolt/sb_regs.h
Normal file
@ -0,0 +1,33 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* USB4 port sideband registers found on routers and retimers
|
||||
*
|
||||
* Copyright (C) 2020, Intel Corporation
|
||||
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
* Rajmohan Mani <rajmohan.mani@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef _SB_REGS
|
||||
#define _SB_REGS
|
||||
|
||||
#define USB4_SB_VENDOR_ID 0x00
|
||||
#define USB4_SB_PRODUCT_ID 0x01
|
||||
#define USB4_SB_OPCODE 0x08
|
||||
|
||||
enum usb4_sb_opcode {
|
||||
USB4_SB_OPCODE_ERR = 0x20525245, /* "ERR " */
|
||||
USB4_SB_OPCODE_ONS = 0x444d4321, /* "!CMD" */
|
||||
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
|
||||
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
|
||||
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
|
||||
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
|
||||
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
|
||||
USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */
|
||||
USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */
|
||||
};
|
||||
|
||||
#define USB4_SB_METADATA 0x09
|
||||
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
|
||||
#define USB4_SB_DATA 0x12
|
||||
|
||||
#endif
|
@ -13,21 +13,12 @@
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
/* Switch NVM support */
|
||||
|
||||
#define NVM_DEVID 0x05
|
||||
#define NVM_VERSION 0x08
|
||||
#define NVM_CSS 0x10
|
||||
#define NVM_FLASH_SIZE 0x45
|
||||
|
||||
#define NVM_MIN_SIZE SZ_32K
|
||||
#define NVM_MAX_SIZE SZ_512K
|
||||
|
||||
static DEFINE_IDA(nvm_ida);
|
||||
|
||||
struct nvm_auth_status {
|
||||
struct list_head list;
|
||||
@ -35,6 +26,11 @@ struct nvm_auth_status {
|
||||
u32 status;
|
||||
};
|
||||
|
||||
enum nvm_write_ops {
|
||||
WRITE_AND_AUTHENTICATE = 1,
|
||||
WRITE_ONLY = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Hold NVM authentication failure status per switch This information
|
||||
* needs to stay around even when the switch gets power cycled so we
|
||||
@ -164,8 +160,12 @@ static int nvm_validate_and_write(struct tb_switch *sw)
|
||||
}
|
||||
|
||||
if (tb_switch_is_usb4(sw))
|
||||
return usb4_switch_nvm_write(sw, 0, buf, image_size);
|
||||
return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
|
||||
ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
|
||||
else
|
||||
ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
|
||||
if (!ret)
|
||||
sw->nvm->flushed = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
|
||||
@ -328,7 +328,8 @@ static int nvm_authenticate(struct tb_switch *sw)
|
||||
static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
{
|
||||
struct tb_switch *sw = priv;
|
||||
struct tb_nvm *nvm = priv;
|
||||
struct tb_switch *sw = tb_to_switch(nvm->dev);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&sw->dev);
|
||||
@ -351,8 +352,9 @@ out:
|
||||
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
{
|
||||
struct tb_switch *sw = priv;
|
||||
int ret = 0;
|
||||
struct tb_nvm *nvm = priv;
|
||||
struct tb_switch *sw = tb_to_switch(nvm->dev);
|
||||
int ret;
|
||||
|
||||
if (!mutex_trylock(&sw->tb->lock))
|
||||
return restart_syscall();
|
||||
@ -363,55 +365,15 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
|
||||
* locally here and handle the special cases when the user asks
|
||||
* us to authenticate the image.
|
||||
*/
|
||||
if (!sw->nvm->buf) {
|
||||
sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
|
||||
if (!sw->nvm->buf) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
sw->nvm->buf_data_size = offset + bytes;
|
||||
memcpy(sw->nvm->buf + offset, val, bytes);
|
||||
|
||||
unlock:
|
||||
ret = tb_nvm_write_buf(nvm, offset, val, bytes);
|
||||
mutex_unlock(&sw->tb->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
|
||||
size_t size, bool active)
|
||||
{
|
||||
struct nvmem_config config;
|
||||
|
||||
memset(&config, 0, sizeof(config));
|
||||
|
||||
if (active) {
|
||||
config.name = "nvm_active";
|
||||
config.reg_read = tb_switch_nvm_read;
|
||||
config.read_only = true;
|
||||
} else {
|
||||
config.name = "nvm_non_active";
|
||||
config.reg_write = tb_switch_nvm_write;
|
||||
config.root_only = true;
|
||||
}
|
||||
|
||||
config.id = id;
|
||||
config.stride = 4;
|
||||
config.word_size = 4;
|
||||
config.size = size;
|
||||
config.dev = &sw->dev;
|
||||
config.owner = THIS_MODULE;
|
||||
config.priv = sw;
|
||||
|
||||
return nvmem_register(&config);
|
||||
}
|
||||
|
||||
static int tb_switch_nvm_add(struct tb_switch *sw)
|
||||
{
|
||||
struct nvmem_device *nvm_dev;
|
||||
struct tb_switch_nvm *nvm;
|
||||
struct tb_nvm *nvm;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
@ -423,18 +385,17 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
|
||||
* currently restrict NVM upgrade for Intel hardware. We may
|
||||
* relax this in the future when we learn other NVM formats.
|
||||
*/
|
||||
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) {
|
||||
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
|
||||
sw->config.vendor_id != 0x8087) {
|
||||
dev_info(&sw->dev,
|
||||
"NVM format of vendor %#x is not known, disabling NVM upgrade\n",
|
||||
sw->config.vendor_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
|
||||
if (!nvm)
|
||||
return -ENOMEM;
|
||||
|
||||
nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
|
||||
nvm = tb_nvm_alloc(&sw->dev);
|
||||
if (IS_ERR(nvm))
|
||||
return PTR_ERR(nvm);
|
||||
|
||||
/*
|
||||
* If the switch is in safe-mode the only accessible portion of
|
||||
@ -446,7 +407,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
|
||||
|
||||
ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
|
||||
if (ret)
|
||||
goto err_ida;
|
||||
goto err_nvm;
|
||||
|
||||
hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
|
||||
nvm_size = (SZ_1M << (val & 7)) / 8;
|
||||
@ -454,44 +415,34 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
|
||||
|
||||
ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
|
||||
if (ret)
|
||||
goto err_ida;
|
||||
goto err_nvm;
|
||||
|
||||
nvm->major = val >> 16;
|
||||
nvm->minor = val >> 8;
|
||||
|
||||
nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
|
||||
if (IS_ERR(nvm_dev)) {
|
||||
ret = PTR_ERR(nvm_dev);
|
||||
goto err_ida;
|
||||
}
|
||||
nvm->active = nvm_dev;
|
||||
ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
}
|
||||
|
||||
if (!sw->no_nvm_upgrade) {
|
||||
nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
|
||||
if (IS_ERR(nvm_dev)) {
|
||||
ret = PTR_ERR(nvm_dev);
|
||||
goto err_nvm_active;
|
||||
}
|
||||
nvm->non_active = nvm_dev;
|
||||
ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
|
||||
tb_switch_nvm_write);
|
||||
if (ret)
|
||||
goto err_nvm;
|
||||
}
|
||||
|
||||
sw->nvm = nvm;
|
||||
return 0;
|
||||
|
||||
err_nvm_active:
|
||||
if (nvm->active)
|
||||
nvmem_unregister(nvm->active);
|
||||
err_ida:
|
||||
ida_simple_remove(&nvm_ida, nvm->id);
|
||||
kfree(nvm);
|
||||
|
||||
err_nvm:
|
||||
tb_nvm_free(nvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tb_switch_nvm_remove(struct tb_switch *sw)
|
||||
{
|
||||
struct tb_switch_nvm *nvm;
|
||||
struct tb_nvm *nvm;
|
||||
|
||||
nvm = sw->nvm;
|
||||
sw->nvm = NULL;
|
||||
@ -503,13 +454,7 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
|
||||
if (!nvm->authenticating)
|
||||
nvm_clear_auth_status(sw);
|
||||
|
||||
if (nvm->non_active)
|
||||
nvmem_unregister(nvm->non_active);
|
||||
if (nvm->active)
|
||||
nvmem_unregister(nvm->active);
|
||||
ida_simple_remove(&nvm_ida, nvm->id);
|
||||
vfree(nvm->buf);
|
||||
kfree(nvm);
|
||||
tb_nvm_free(nvm);
|
||||
}
|
||||
|
||||
/* port utility functions */
|
||||
@ -789,8 +734,11 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
|
||||
ida = &port->out_hopids;
|
||||
}
|
||||
|
||||
/* HopIDs 0-7 are reserved */
|
||||
if (min_hopid < TB_PATH_MIN_HOPID)
|
||||
/*
|
||||
* NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
|
||||
* reserved.
|
||||
*/
|
||||
if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID)
|
||||
min_hopid = TB_PATH_MIN_HOPID;
|
||||
|
||||
if (max_hopid < 0 || max_hopid > port_max_hopid)
|
||||
@ -847,6 +795,13 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid)
|
||||
ida_simple_remove(&port->out_hopids, hopid);
|
||||
}
|
||||
|
||||
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
|
||||
const struct tb_switch *sw)
|
||||
{
|
||||
u64 mask = (1ULL << parent->config.depth * 8) - 1;
|
||||
return (tb_route(parent) & mask) == (tb_route(sw) & mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_next_port_on_path() - Return next port for given port on a path
|
||||
* @start: Start port of the walk
|
||||
@ -876,12 +831,12 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
||||
return end;
|
||||
}
|
||||
|
||||
if (start->sw->config.depth < end->sw->config.depth) {
|
||||
if (tb_switch_is_reachable(prev->sw, end->sw)) {
|
||||
next = tb_port_at(tb_route(end->sw), prev->sw);
|
||||
/* Walk down the topology if next == prev */
|
||||
if (prev->remote &&
|
||||
prev->remote->sw->config.depth > prev->sw->config.depth)
|
||||
(next == prev || next->dual_link_port == prev))
|
||||
next = prev->remote;
|
||||
else
|
||||
next = tb_port_at(tb_route(end->sw), prev->sw);
|
||||
} else {
|
||||
if (tb_is_upstream_port(prev)) {
|
||||
next = prev->remote;
|
||||
@ -898,10 +853,16 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
||||
}
|
||||
}
|
||||
|
||||
return next;
|
||||
return next != prev ? next : NULL;
|
||||
}
|
||||
|
||||
static int tb_port_get_link_speed(struct tb_port *port)
|
||||
/**
|
||||
* tb_port_get_link_speed() - Get current link speed
|
||||
* @port: Port to check (USB4 or CIO)
|
||||
*
|
||||
* Returns link speed in Gb/s or negative errno in case of failure.
|
||||
*/
|
||||
int tb_port_get_link_speed(struct tb_port *port)
|
||||
{
|
||||
u32 val, speed;
|
||||
int ret;
|
||||
@ -1532,11 +1493,11 @@ static ssize_t nvm_authenticate_show(struct device *dev,
|
||||
return sprintf(buf, "%#x\n", status);
|
||||
}
|
||||
|
||||
static ssize_t nvm_authenticate_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
|
||||
bool disconnect)
|
||||
{
|
||||
struct tb_switch *sw = tb_to_switch(dev);
|
||||
bool val;
|
||||
int val;
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&sw->dev);
|
||||
@ -1552,25 +1513,32 @@ static ssize_t nvm_authenticate_store(struct device *dev,
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
ret = kstrtobool(buf, &val);
|
||||
ret = kstrtoint(buf, 10, &val);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
/* Always clear the authentication status */
|
||||
nvm_clear_auth_status(sw);
|
||||
|
||||
if (val) {
|
||||
if (!sw->nvm->buf) {
|
||||
ret = -EINVAL;
|
||||
goto exit_unlock;
|
||||
if (val > 0) {
|
||||
if (!sw->nvm->flushed) {
|
||||
if (!sw->nvm->buf) {
|
||||
ret = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
ret = nvm_validate_and_write(sw);
|
||||
if (ret || val == WRITE_ONLY)
|
||||
goto exit_unlock;
|
||||
}
|
||||
if (val == WRITE_AND_AUTHENTICATE) {
|
||||
if (disconnect) {
|
||||
ret = tb_lc_force_power(sw);
|
||||
} else {
|
||||
sw->nvm->authenticating = true;
|
||||
ret = nvm_authenticate(sw);
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvm_validate_and_write(sw);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
sw->nvm->authenticating = true;
|
||||
ret = nvm_authenticate(sw);
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
@ -1579,12 +1547,35 @@ exit_rpm:
|
||||
pm_runtime_mark_last_busy(&sw->dev);
|
||||
pm_runtime_put_autosuspend(&sw->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t nvm_authenticate_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int ret = nvm_authenticate_sysfs(dev, buf, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(nvm_authenticate);
|
||||
|
||||
static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return nvm_authenticate_show(dev, attr, buf);
|
||||
}
|
||||
|
||||
static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nvm_authenticate_sysfs(dev, buf, true);
|
||||
return ret ? ret : count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
|
||||
|
||||
static ssize_t nvm_version_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -1642,6 +1633,7 @@ static struct attribute *switch_attrs[] = {
|
||||
&dev_attr_generation.attr,
|
||||
&dev_attr_key.attr,
|
||||
&dev_attr_nvm_authenticate.attr,
|
||||
&dev_attr_nvm_authenticate_on_disconnect.attr,
|
||||
&dev_attr_nvm_version.attr,
|
||||
&dev_attr_rx_speed.attr,
|
||||
&dev_attr_rx_lanes.attr,
|
||||
@ -1696,6 +1688,10 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
|
||||
if (tb_route(sw))
|
||||
return attr->mode;
|
||||
return 0;
|
||||
} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
|
||||
if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
|
||||
return attr->mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return sw->safe_mode ? 0 : attr->mode;
|
||||
@ -2440,6 +2436,9 @@ void tb_switch_remove(struct tb_switch *sw)
|
||||
tb_xdomain_remove(port->xdomain);
|
||||
port->xdomain = NULL;
|
||||
}
|
||||
|
||||
/* Remove any downstream retimers */
|
||||
tb_retimer_remove_all(port);
|
||||
}
|
||||
|
||||
if (!sw->is_unplugged)
|
||||
@ -2755,8 +2754,3 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void tb_switch_exit(void)
|
||||
{
|
||||
ida_destroy(&nvm_ida);
|
||||
}
|
||||
|
@ -206,27 +206,197 @@ static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
|
||||
}
|
||||
|
||||
static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
|
||||
const struct tb_port *port)
|
||||
const struct tb_port *port)
|
||||
{
|
||||
struct tb_port *down;
|
||||
|
||||
down = usb4_switch_map_usb3_down(sw, port);
|
||||
if (down) {
|
||||
if (WARN_ON(!tb_port_is_usb3_down(down)))
|
||||
goto out;
|
||||
if (WARN_ON(tb_usb3_port_is_enabled(down)))
|
||||
goto out;
|
||||
|
||||
if (down && !tb_usb3_port_is_enabled(down))
|
||||
return down;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
|
||||
struct tb_port *src_port,
|
||||
struct tb_port *dst_port)
|
||||
{
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
|
||||
if (tunnel->type == type &&
|
||||
((src_port && src_port == tunnel->src_port) ||
|
||||
(dst_port && dst_port == tunnel->dst_port))) {
|
||||
return tunnel;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
|
||||
struct tb_port *src_port,
|
||||
struct tb_port *dst_port)
|
||||
{
|
||||
struct tb_port *port, *usb3_down;
|
||||
struct tb_switch *sw;
|
||||
|
||||
/* Pick the router that is deepest in the topology */
|
||||
if (dst_port->sw->config.depth > src_port->sw->config.depth)
|
||||
sw = dst_port->sw;
|
||||
else
|
||||
sw = src_port->sw;
|
||||
|
||||
/* Can't be the host router */
|
||||
if (sw == tb->root_switch)
|
||||
return NULL;
|
||||
|
||||
/* Find the downstream USB4 port that leads to this router */
|
||||
port = tb_port_at(tb_route(sw), tb->root_switch);
|
||||
/* Find the corresponding host router USB3 downstream port */
|
||||
usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
|
||||
if (!usb3_down)
|
||||
return NULL;
|
||||
|
||||
return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
|
||||
}
|
||||
|
||||
static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, int *available_up, int *available_down)
|
||||
{
|
||||
int usb3_consumed_up, usb3_consumed_down, ret;
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_port *port;
|
||||
|
||||
tb_port_dbg(dst_port, "calculating available bandwidth\n");
|
||||
|
||||
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
|
||||
if (tunnel) {
|
||||
ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
|
||||
&usb3_consumed_down);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
usb3_consumed_up = 0;
|
||||
usb3_consumed_down = 0;
|
||||
}
|
||||
|
||||
*available_up = *available_down = 40000;
|
||||
|
||||
/* Find the minimum available bandwidth over all links */
|
||||
tb_for_each_port_on_path(src_port, dst_port, port) {
|
||||
int link_speed, link_width, up_bw, down_bw;
|
||||
|
||||
if (!tb_port_is_null(port))
|
||||
continue;
|
||||
|
||||
if (tb_is_upstream_port(port)) {
|
||||
link_speed = port->sw->link_speed;
|
||||
} else {
|
||||
link_speed = tb_port_get_link_speed(port);
|
||||
if (link_speed < 0)
|
||||
return link_speed;
|
||||
}
|
||||
|
||||
link_width = port->bonded ? 2 : 1;
|
||||
|
||||
up_bw = link_speed * link_width * 1000; /* Mb/s */
|
||||
/* Leave 10% guard band */
|
||||
up_bw -= up_bw / 10;
|
||||
down_bw = up_bw;
|
||||
|
||||
tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
|
||||
|
||||
/*
|
||||
* Find all DP tunnels that cross the port and reduce
|
||||
* their consumed bandwidth from the available.
|
||||
*/
|
||||
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
|
||||
int dp_consumed_up, dp_consumed_down;
|
||||
|
||||
if (!tb_tunnel_is_dp(tunnel))
|
||||
continue;
|
||||
|
||||
if (!tb_tunnel_port_on_path(tunnel, port))
|
||||
continue;
|
||||
|
||||
ret = tb_tunnel_consumed_bandwidth(tunnel,
|
||||
&dp_consumed_up,
|
||||
&dp_consumed_down);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
up_bw -= dp_consumed_up;
|
||||
down_bw -= dp_consumed_down;
|
||||
}
|
||||
|
||||
/*
|
||||
* If USB3 is tunneled from the host router down to the
|
||||
* branch leading to port we need to take USB3 consumed
|
||||
* bandwidth into account regardless whether it actually
|
||||
* crosses the port.
|
||||
*/
|
||||
up_bw -= usb3_consumed_up;
|
||||
down_bw -= usb3_consumed_down;
|
||||
|
||||
if (up_bw < *available_up)
|
||||
*available_up = up_bw;
|
||||
if (down_bw < *available_down)
|
||||
*available_down = down_bw;
|
||||
}
|
||||
|
||||
if (*available_up < 0)
|
||||
*available_up = 0;
|
||||
if (*available_down < 0)
|
||||
*available_down = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_release_unused_usb3_bandwidth(struct tb *tb,
|
||||
struct tb_port *src_port,
|
||||
struct tb_port *dst_port)
|
||||
{
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
|
||||
return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
|
||||
}
|
||||
|
||||
static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port)
|
||||
{
|
||||
int ret, available_up, available_down;
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
|
||||
if (!tunnel)
|
||||
return;
|
||||
|
||||
tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
|
||||
|
||||
/*
|
||||
* Calculate available bandwidth for the first hop USB3 tunnel.
|
||||
* That determines the whole USB3 bandwidth for this branch.
|
||||
*/
|
||||
ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
|
||||
&available_up, &available_down);
|
||||
if (ret) {
|
||||
tb_warn(tb, "failed to calculate available bandwidth\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
|
||||
available_up, available_down);
|
||||
|
||||
tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
|
||||
}
|
||||
|
||||
static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
struct tb_switch *parent = tb_switch_parent(sw);
|
||||
int ret, available_up, available_down;
|
||||
struct tb_port *up, *down, *port;
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_tunnel *tunnel;
|
||||
@ -235,6 +405,9 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
|
||||
if (!up)
|
||||
return 0;
|
||||
|
||||
if (!sw->link_usb4)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Look up available down port. Since we are chaining it should
|
||||
* be found right above this switch.
|
||||
@ -254,21 +427,48 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
|
||||
parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
|
||||
if (!parent_up || !tb_port_is_enabled(parent_up))
|
||||
return 0;
|
||||
|
||||
/* Make all unused bandwidth available for the new tunnel */
|
||||
ret = tb_release_unused_usb3_bandwidth(tb, down, up);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tunnel = tb_tunnel_alloc_usb3(tb, up, down);
|
||||
if (!tunnel)
|
||||
return -ENOMEM;
|
||||
ret = tb_available_bandwidth(tb, down, up, &available_up,
|
||||
&available_down);
|
||||
if (ret)
|
||||
goto err_reclaim;
|
||||
|
||||
tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
|
||||
available_up, available_down);
|
||||
|
||||
tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
|
||||
available_down);
|
||||
if (!tunnel) {
|
||||
ret = -ENOMEM;
|
||||
goto err_reclaim;
|
||||
}
|
||||
|
||||
if (tb_tunnel_activate(tunnel)) {
|
||||
tb_port_info(up,
|
||||
"USB3 tunnel activation failed, aborting\n");
|
||||
tb_tunnel_free(tunnel);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
list_add_tail(&tunnel->list, &tcm->tunnel_list);
|
||||
if (tb_route(parent))
|
||||
tb_reclaim_usb3_bandwidth(tb, down, up);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
tb_tunnel_free(tunnel);
|
||||
err_reclaim:
|
||||
if (tb_route(parent))
|
||||
tb_reclaim_usb3_bandwidth(tb, down, up);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tb_create_usb3_tunnels(struct tb_switch *sw)
|
||||
@ -339,6 +539,9 @@ static void tb_scan_port(struct tb_port *port)
|
||||
tb_port_dbg(port, "port already has a remote\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tb_retimer_scan(port);
|
||||
|
||||
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
|
||||
tb_downstream_route(port));
|
||||
if (IS_ERR(sw)) {
|
||||
@ -395,6 +598,9 @@ static void tb_scan_port(struct tb_port *port)
|
||||
if (tb_enable_tmu(sw))
|
||||
tb_sw_warn(sw, "failed to enable TMU\n");
|
||||
|
||||
/* Scan upstream retimers */
|
||||
tb_retimer_scan(upstream_port);
|
||||
|
||||
/*
|
||||
* Create USB 3.x tunnels only when the switch is plugged to the
|
||||
* domain. This is because we scan the domain also during discovery
|
||||
@ -404,43 +610,44 @@ static void tb_scan_port(struct tb_port *port)
|
||||
if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
|
||||
tb_sw_warn(sw, "USB3 tunnel creation failed\n");
|
||||
|
||||
tb_add_dp_resources(sw);
|
||||
tb_scan_switch(sw);
|
||||
}
|
||||
|
||||
static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
|
||||
struct tb_port *src_port,
|
||||
struct tb_port *dst_port)
|
||||
{
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
|
||||
if (tunnel->type == type &&
|
||||
((src_port && src_port == tunnel->src_port) ||
|
||||
(dst_port && dst_port == tunnel->dst_port))) {
|
||||
return tunnel;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
|
||||
{
|
||||
struct tb_port *src_port, *dst_port;
|
||||
struct tb *tb;
|
||||
|
||||
if (!tunnel)
|
||||
return;
|
||||
|
||||
tb_tunnel_deactivate(tunnel);
|
||||
list_del(&tunnel->list);
|
||||
|
||||
/*
|
||||
* In case of DP tunnel make sure the DP IN resource is deallocated
|
||||
* properly.
|
||||
*/
|
||||
if (tb_tunnel_is_dp(tunnel)) {
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
tb = tunnel->tb;
|
||||
src_port = tunnel->src_port;
|
||||
dst_port = tunnel->dst_port;
|
||||
|
||||
tb_switch_dealloc_dp_resource(in->sw, in);
|
||||
switch (tunnel->type) {
|
||||
case TB_TUNNEL_DP:
|
||||
/*
|
||||
* In case of DP tunnel make sure the DP IN resource is
|
||||
* deallocated properly.
|
||||
*/
|
||||
tb_switch_dealloc_dp_resource(src_port->sw, src_port);
|
||||
fallthrough;
|
||||
|
||||
case TB_TUNNEL_USB3:
|
||||
tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
|
||||
break;
|
||||
|
||||
default:
|
||||
/*
|
||||
* PCIe and DMA tunnels do not consume guaranteed
|
||||
* bandwidth.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
tb_tunnel_free(tunnel);
|
||||
@ -473,6 +680,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
|
||||
continue;
|
||||
|
||||
if (port->remote->sw->is_unplugged) {
|
||||
tb_retimer_remove_all(port);
|
||||
tb_remove_dp_resources(port->remote->sw);
|
||||
tb_switch_lane_bonding_disable(port->remote->sw);
|
||||
tb_switch_remove(port->remote->sw);
|
||||
@ -524,7 +732,7 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
|
||||
if (down) {
|
||||
if (WARN_ON(!tb_port_is_pcie_down(down)))
|
||||
goto out;
|
||||
if (WARN_ON(tb_pci_port_is_enabled(down)))
|
||||
if (tb_pci_port_is_enabled(down))
|
||||
goto out;
|
||||
|
||||
return down;
|
||||
@ -534,51 +742,49 @@ out:
|
||||
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
|
||||
}
|
||||
|
||||
static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
|
||||
struct tb_port *out)
|
||||
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
|
||||
{
|
||||
struct tb_switch *sw = out->sw;
|
||||
struct tb_tunnel *tunnel;
|
||||
int bw, available_bw = 40000;
|
||||
struct tb_port *host_port, *port;
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
|
||||
while (sw && sw != in->sw) {
|
||||
bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
|
||||
/* Leave 10% guard band */
|
||||
bw -= bw / 10;
|
||||
host_port = tb_route(in->sw) ?
|
||||
tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
|
||||
|
||||
/*
|
||||
* Check for any active DP tunnels that go through this
|
||||
* switch and reduce their consumed bandwidth from
|
||||
* available.
|
||||
*/
|
||||
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
|
||||
int consumed_bw;
|
||||
list_for_each_entry(port, &tcm->dp_resources, list) {
|
||||
if (!tb_port_is_dpout(port))
|
||||
continue;
|
||||
|
||||
if (!tb_tunnel_switch_on_path(tunnel, sw))
|
||||
continue;
|
||||
|
||||
consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
|
||||
if (consumed_bw < 0)
|
||||
return consumed_bw;
|
||||
|
||||
bw -= consumed_bw;
|
||||
if (tb_port_is_enabled(port)) {
|
||||
tb_port_dbg(port, "in use\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bw < available_bw)
|
||||
available_bw = bw;
|
||||
tb_port_dbg(port, "DP OUT available\n");
|
||||
|
||||
sw = tb_switch_parent(sw);
|
||||
/*
|
||||
* Keep the DP tunnel under the topology starting from
|
||||
* the same host router downstream port.
|
||||
*/
|
||||
if (host_port && tb_route(port->sw)) {
|
||||
struct tb_port *p;
|
||||
|
||||
p = tb_port_at(tb_route(port->sw), tb->root_switch);
|
||||
if (p != host_port)
|
||||
continue;
|
||||
}
|
||||
|
||||
return port;
|
||||
}
|
||||
|
||||
return available_bw;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tb_tunnel_dp(struct tb *tb)
|
||||
{
|
||||
int available_up, available_down, ret;
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_port *port, *in, *out;
|
||||
struct tb_tunnel *tunnel;
|
||||
int available_bw;
|
||||
|
||||
/*
|
||||
* Find pair of inactive DP IN and DP OUT adapters and then
|
||||
@ -589,17 +795,21 @@ static void tb_tunnel_dp(struct tb *tb)
|
||||
in = NULL;
|
||||
out = NULL;
|
||||
list_for_each_entry(port, &tcm->dp_resources, list) {
|
||||
if (!tb_port_is_dpin(port))
|
||||
continue;
|
||||
|
||||
if (tb_port_is_enabled(port)) {
|
||||
tb_port_dbg(port, "in use\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
tb_port_dbg(port, "available\n");
|
||||
tb_port_dbg(port, "DP IN available\n");
|
||||
|
||||
if (!in && tb_port_is_dpin(port))
|
||||
out = tb_find_dp_out(tb, port);
|
||||
if (out) {
|
||||
in = port;
|
||||
else if (!out && tb_port_is_dpout(port))
|
||||
out = port;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!in) {
|
||||
@ -616,32 +826,41 @@ static void tb_tunnel_dp(struct tb *tb)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Calculate available bandwidth between in and out */
|
||||
available_bw = tb_available_bw(tcm, in, out);
|
||||
if (available_bw < 0) {
|
||||
tb_warn(tb, "failed to determine available bandwidth\n");
|
||||
return;
|
||||
/* Make all unused USB3 bandwidth available for the new DP tunnel */
|
||||
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
|
||||
if (ret) {
|
||||
tb_warn(tb, "failed to release unused bandwidth\n");
|
||||
goto err_dealloc_dp;
|
||||
}
|
||||
|
||||
tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
|
||||
available_bw);
|
||||
ret = tb_available_bandwidth(tb, in, out, &available_up,
|
||||
&available_down);
|
||||
if (ret)
|
||||
goto err_reclaim;
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
|
||||
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
|
||||
available_up, available_down);
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
|
||||
if (!tunnel) {
|
||||
tb_port_dbg(out, "could not allocate DP tunnel\n");
|
||||
goto dealloc_dp;
|
||||
goto err_reclaim;
|
||||
}
|
||||
|
||||
if (tb_tunnel_activate(tunnel)) {
|
||||
tb_port_info(out, "DP tunnel activation failed, aborting\n");
|
||||
tb_tunnel_free(tunnel);
|
||||
goto dealloc_dp;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
list_add_tail(&tunnel->list, &tcm->tunnel_list);
|
||||
tb_reclaim_usb3_bandwidth(tb, in, out);
|
||||
return;
|
||||
|
||||
dealloc_dp:
|
||||
err_free:
|
||||
tb_tunnel_free(tunnel);
|
||||
err_reclaim:
|
||||
tb_reclaim_usb3_bandwidth(tb, in, out);
|
||||
err_dealloc_dp:
|
||||
tb_switch_dealloc_dp_resource(in->sw, in);
|
||||
}
|
||||
|
||||
@ -827,6 +1046,8 @@ static void tb_handle_hotplug(struct work_struct *work)
|
||||
goto put_sw;
|
||||
}
|
||||
if (ev->unplug) {
|
||||
tb_retimer_remove_all(port);
|
||||
|
||||
if (tb_port_has_remote(port)) {
|
||||
tb_port_dbg(port, "switch unplugged\n");
|
||||
tb_sw_set_unplugged(port->remote->sw);
|
||||
@ -1071,6 +1292,7 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw)
|
||||
if (tb_is_upstream_port(port))
|
||||
continue;
|
||||
if (port->xdomain && port->xdomain->is_unplugged) {
|
||||
tb_retimer_remove_all(port);
|
||||
tb_xdomain_remove(port->xdomain);
|
||||
port->xdomain = NULL;
|
||||
ret++;
|
||||
|
@ -18,8 +18,17 @@
|
||||
#include "ctl.h"
|
||||
#include "dma_port.h"
|
||||
|
||||
#define NVM_MIN_SIZE SZ_32K
|
||||
#define NVM_MAX_SIZE SZ_512K
|
||||
|
||||
/* Intel specific NVM offsets */
|
||||
#define NVM_DEVID 0x05
|
||||
#define NVM_VERSION 0x08
|
||||
#define NVM_FLASH_SIZE 0x45
|
||||
|
||||
/**
|
||||
* struct tb_switch_nvm - Structure holding switch NVM information
|
||||
* struct tb_nvm - Structure holding NVM information
|
||||
* @dev: Owner of the NVM
|
||||
* @major: Major version number of the active NVM portion
|
||||
* @minor: Minor version number of the active NVM portion
|
||||
* @id: Identifier used with both NVM portions
|
||||
@ -29,9 +38,14 @@
|
||||
* the actual NVM flash device
|
||||
* @buf_data_size: Number of bytes actually consumed by the new NVM
|
||||
* image
|
||||
* @authenticating: The switch is authenticating the new NVM
|
||||
* @authenticating: The device is authenticating the new NVM
|
||||
* @flushed: The image has been flushed to the storage area
|
||||
*
|
||||
* The user of this structure needs to handle serialization of possible
|
||||
* concurrent access.
|
||||
*/
|
||||
struct tb_switch_nvm {
|
||||
struct tb_nvm {
|
||||
struct device *dev;
|
||||
u8 major;
|
||||
u8 minor;
|
||||
int id;
|
||||
@ -40,6 +54,7 @@ struct tb_switch_nvm {
|
||||
void *buf;
|
||||
size_t buf_data_size;
|
||||
bool authenticating;
|
||||
bool flushed;
|
||||
};
|
||||
|
||||
#define TB_SWITCH_KEY_SIZE 32
|
||||
@ -97,6 +112,7 @@ struct tb_switch_tmu {
|
||||
* @device_name: Name of the device (or %NULL if not known)
|
||||
* @link_speed: Speed of the link in Gb/s
|
||||
* @link_width: Width of the link (1 or 2)
|
||||
* @link_usb4: Upstream link is USB4
|
||||
* @generation: Switch Thunderbolt generation
|
||||
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
|
||||
* @cap_lc: Offset to the link controller capability (%0 if not found)
|
||||
@ -117,6 +133,7 @@ struct tb_switch_tmu {
|
||||
* @depth: Depth in the chain this switch is connected (ICM only)
|
||||
* @rpm_complete: Completion used to wait for runtime resume to
|
||||
* complete (ICM only)
|
||||
* @quirks: Quirks used for this Thunderbolt switch
|
||||
*
|
||||
* When the switch is being added or removed to the domain (other
|
||||
* switches) you need to have domain lock held.
|
||||
@ -136,12 +153,13 @@ struct tb_switch {
|
||||
const char *device_name;
|
||||
unsigned int link_speed;
|
||||
unsigned int link_width;
|
||||
bool link_usb4;
|
||||
unsigned int generation;
|
||||
int cap_plug_events;
|
||||
int cap_lc;
|
||||
bool is_unplugged;
|
||||
u8 *drom;
|
||||
struct tb_switch_nvm *nvm;
|
||||
struct tb_nvm *nvm;
|
||||
bool no_nvm_upgrade;
|
||||
bool safe_mode;
|
||||
bool boot;
|
||||
@ -154,6 +172,7 @@ struct tb_switch {
|
||||
u8 link;
|
||||
u8 depth;
|
||||
struct completion rpm_complete;
|
||||
unsigned long quirks;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -195,6 +214,28 @@ struct tb_port {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* tb_retimer: Thunderbolt retimer
|
||||
* @dev: Device for the retimer
|
||||
* @tb: Pointer to the domain the retimer belongs to
|
||||
* @index: Retimer index facing the router USB4 port
|
||||
* @vendor: Vendor ID of the retimer
|
||||
* @device: Device ID of the retimer
|
||||
* @port: Pointer to the lane 0 adapter
|
||||
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
|
||||
* @auth_status: Status of last NVM authentication
|
||||
*/
|
||||
struct tb_retimer {
|
||||
struct device dev;
|
||||
struct tb *tb;
|
||||
u8 index;
|
||||
u32 vendor;
|
||||
u32 device;
|
||||
struct tb_port *port;
|
||||
struct tb_nvm *nvm;
|
||||
u32 auth_status;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tb_path_hop - routing information for a tb_path
|
||||
* @in_port: Ingress port of a switch
|
||||
@ -286,7 +327,11 @@ struct tb_path {
|
||||
|
||||
/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
|
||||
#define TB_PATH_MIN_HOPID 8
|
||||
#define TB_PATH_MAX_HOPS 7
|
||||
/*
|
||||
* Support paths from the farthest (depth 6) router to the host and back
|
||||
* to the same level (not necessarily to the same router).
|
||||
*/
|
||||
#define TB_PATH_MAX_HOPS (7 * 2)
|
||||
|
||||
/**
|
||||
* struct tb_cm_ops - Connection manager specific operations vector
|
||||
@ -534,11 +579,11 @@ struct tb *icm_probe(struct tb_nhi *nhi);
|
||||
struct tb *tb_probe(struct tb_nhi *nhi);
|
||||
|
||||
extern struct device_type tb_domain_type;
|
||||
extern struct device_type tb_retimer_type;
|
||||
extern struct device_type tb_switch_type;
|
||||
|
||||
int tb_domain_init(void);
|
||||
void tb_domain_exit(void);
|
||||
void tb_switch_exit(void);
|
||||
int tb_xdomain_init(void);
|
||||
void tb_xdomain_exit(void);
|
||||
|
||||
@ -571,6 +616,15 @@ static inline void tb_domain_put(struct tb *tb)
|
||||
put_device(&tb->dev);
|
||||
}
|
||||
|
||||
struct tb_nvm *tb_nvm_alloc(struct device *dev);
|
||||
int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
|
||||
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
|
||||
size_t bytes);
|
||||
int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
|
||||
nvmem_reg_write_t reg_write);
|
||||
void tb_nvm_free(struct tb_nvm *nvm);
|
||||
void tb_nvm_exit(void);
|
||||
|
||||
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
|
||||
u64 route);
|
||||
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
|
||||
@ -741,6 +795,20 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid);
|
||||
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
||||
struct tb_port *prev);
|
||||
|
||||
/**
|
||||
* tb_for_each_port_on_path() - Iterate over each port on path
|
||||
* @src: Source port
|
||||
* @dst: Destination port
|
||||
* @p: Port used as iterator
|
||||
*
|
||||
* Walks over each port on path from @src to @dst.
|
||||
*/
|
||||
#define tb_for_each_port_on_path(src, dst, p) \
|
||||
for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
|
||||
(p) = tb_next_port_on_path((src), (dst), (p)))
|
||||
|
||||
int tb_port_get_link_speed(struct tb_port *port);
|
||||
|
||||
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
|
||||
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
|
||||
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
|
||||
@ -769,8 +837,8 @@ void tb_path_free(struct tb_path *path);
|
||||
int tb_path_activate(struct tb_path *path);
|
||||
void tb_path_deactivate(struct tb_path *path);
|
||||
bool tb_path_is_invalid(struct tb_path *path);
|
||||
bool tb_path_switch_on_path(const struct tb_path *path,
|
||||
const struct tb_switch *sw);
|
||||
bool tb_path_port_on_path(const struct tb_path *path,
|
||||
const struct tb_port *port);
|
||||
|
||||
int tb_drom_read(struct tb_switch *sw);
|
||||
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
|
||||
@ -783,6 +851,7 @@ bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
|
||||
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
|
||||
int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
|
||||
int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
|
||||
int tb_lc_force_power(struct tb_switch *sw);
|
||||
|
||||
static inline int tb_route_length(u64 route)
|
||||
{
|
||||
@ -812,6 +881,21 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
|
||||
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
|
||||
u8 depth);
|
||||
|
||||
int tb_retimer_scan(struct tb_port *port);
|
||||
void tb_retimer_remove_all(struct tb_port *port);
|
||||
|
||||
static inline bool tb_is_retimer(const struct device *dev)
|
||||
{
|
||||
return dev->type == &tb_retimer_type;
|
||||
}
|
||||
|
||||
static inline struct tb_retimer *tb_to_retimer(struct device *dev)
|
||||
{
|
||||
if (tb_is_retimer(dev))
|
||||
return container_of(dev, struct tb_retimer, dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int usb4_switch_setup(struct tb_switch *sw);
|
||||
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
|
||||
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
@ -835,4 +919,35 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
|
||||
const struct tb_port *port);
|
||||
|
||||
int usb4_port_unlock(struct tb_port *port);
|
||||
int usb4_port_enumerate_retimers(struct tb_port *port);
|
||||
|
||||
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
|
||||
u8 size);
|
||||
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
|
||||
const void *buf, u8 size);
|
||||
int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
|
||||
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
|
||||
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
|
||||
unsigned int address, const void *buf,
|
||||
size_t size);
|
||||
int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
|
||||
int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
|
||||
u32 *status);
|
||||
int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
|
||||
unsigned int address, void *buf, size_t size);
|
||||
|
||||
int usb4_usb3_port_max_link_rate(struct tb_port *port);
|
||||
int usb4_usb3_port_actual_link_rate(struct tb_port *port);
|
||||
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw);
|
||||
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw);
|
||||
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw);
|
||||
|
||||
/* keep link controller awake during update */
|
||||
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
|
||||
|
||||
void tb_check_quirks(struct tb_switch *sw);
|
||||
|
||||
#endif
|
||||
|
@ -288,8 +288,19 @@ struct tb_regs_port_header {
|
||||
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
|
||||
|
||||
/* USB4 port registers */
|
||||
#define PORT_CS_1 0x01
|
||||
#define PORT_CS_1_LENGTH_SHIFT 8
|
||||
#define PORT_CS_1_TARGET_MASK GENMASK(18, 16)
|
||||
#define PORT_CS_1_TARGET_SHIFT 16
|
||||
#define PORT_CS_1_RETIMER_INDEX_SHIFT 20
|
||||
#define PORT_CS_1_WNR_WRITE BIT(24)
|
||||
#define PORT_CS_1_NR BIT(25)
|
||||
#define PORT_CS_1_RC BIT(26)
|
||||
#define PORT_CS_1_PND BIT(31)
|
||||
#define PORT_CS_2 0x02
|
||||
#define PORT_CS_18 0x12
|
||||
#define PORT_CS_18_BE BIT(8)
|
||||
#define PORT_CS_18_TCM BIT(9)
|
||||
#define PORT_CS_19 0x13
|
||||
#define PORT_CS_19_PC BIT(3)
|
||||
|
||||
@ -337,6 +348,25 @@ struct tb_regs_port_header {
|
||||
#define ADP_USB3_CS_0 0x00
|
||||
#define ADP_USB3_CS_0_V BIT(30)
|
||||
#define ADP_USB3_CS_0_PE BIT(31)
|
||||
#define ADP_USB3_CS_1 0x01
|
||||
#define ADP_USB3_CS_1_CUBW_MASK GENMASK(11, 0)
|
||||
#define ADP_USB3_CS_1_CDBW_MASK GENMASK(23, 12)
|
||||
#define ADP_USB3_CS_1_CDBW_SHIFT 12
|
||||
#define ADP_USB3_CS_1_HCA BIT(31)
|
||||
#define ADP_USB3_CS_2 0x02
|
||||
#define ADP_USB3_CS_2_AUBW_MASK GENMASK(11, 0)
|
||||
#define ADP_USB3_CS_2_ADBW_MASK GENMASK(23, 12)
|
||||
#define ADP_USB3_CS_2_ADBW_SHIFT 12
|
||||
#define ADP_USB3_CS_2_CMR BIT(31)
|
||||
#define ADP_USB3_CS_3 0x03
|
||||
#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0)
|
||||
#define ADP_USB3_CS_4 0x04
|
||||
#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0)
|
||||
#define ADP_USB3_CS_4_ALR_20G 0x1
|
||||
#define ADP_USB3_CS_4_ULV BIT(7)
|
||||
#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12)
|
||||
#define ADP_USB3_CS_4_MSLR_SHIFT 12
|
||||
#define ADP_USB3_CS_4_MSLR_20G 0x1
|
||||
|
||||
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
|
||||
struct tb_regs_hop {
|
||||
@ -379,6 +409,7 @@ struct tb_regs_hop {
|
||||
#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
|
||||
#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
|
||||
#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
|
||||
#define TB_LC_POWER 0x740
|
||||
|
||||
/* Link controller registers */
|
||||
#define TB_LC_PORT_ATTR 0x8d
|
||||
|
1626
drivers/thunderbolt/test.c
Normal file
1626
drivers/thunderbolt/test.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -124,8 +124,9 @@ static void tb_pci_init_path(struct tb_path *path)
|
||||
path->drop_packages = 0;
|
||||
path->nfc_credits = 0;
|
||||
path->hops[0].initial_credits = 7;
|
||||
path->hops[1].initial_credits =
|
||||
tb_initial_credits(path->hops[1].in_port->sw);
|
||||
if (path->path_length > 1)
|
||||
path->hops[1].initial_credits =
|
||||
tb_initial_credits(path->hops[1].in_port->sw);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -422,7 +423,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
|
||||
u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
|
||||
struct tb_port *out = tunnel->dst_port;
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
int ret;
|
||||
int ret, max_bw;
|
||||
|
||||
/*
|
||||
* Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
|
||||
@ -471,10 +472,15 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
|
||||
tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
|
||||
out_rate, out_lanes, bw);
|
||||
|
||||
if (tunnel->max_bw && bw > tunnel->max_bw) {
|
||||
if (in->sw->config.depth < out->sw->config.depth)
|
||||
max_bw = tunnel->max_down;
|
||||
else
|
||||
max_bw = tunnel->max_up;
|
||||
|
||||
if (max_bw && bw > max_bw) {
|
||||
u32 new_rate, new_lanes, new_bw;
|
||||
|
||||
ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
|
||||
ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
|
||||
out_rate, out_lanes, &new_rate,
|
||||
&new_lanes);
|
||||
if (ret) {
|
||||
@ -535,7 +541,8 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
|
||||
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down)
|
||||
{
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
const struct tb_switch *sw = in->sw;
|
||||
@ -543,7 +550,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
|
||||
int ret;
|
||||
|
||||
if (tb_dp_is_usb4(sw)) {
|
||||
int timeout = 10;
|
||||
int timeout = 20;
|
||||
|
||||
/*
|
||||
* Wait for DPRX done. Normally it should be already set
|
||||
@ -579,10 +586,20 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
|
||||
lanes = tb_dp_cap_get_lanes(val);
|
||||
} else {
|
||||
/* No bandwidth management for legacy devices */
|
||||
*consumed_up = 0;
|
||||
*consumed_down = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return tb_dp_bandwidth(rate, lanes);
|
||||
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
|
||||
*consumed_up = 0;
|
||||
*consumed_down = tb_dp_bandwidth(rate, lanes);
|
||||
} else {
|
||||
*consumed_up = tb_dp_bandwidth(rate, lanes);
|
||||
*consumed_down = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tb_dp_init_aux_path(struct tb_path *path)
|
||||
@ -708,7 +725,10 @@ err_free:
|
||||
* @tb: Pointer to the domain structure
|
||||
* @in: DP in adapter port
|
||||
* @out: DP out adapter port
|
||||
* @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
|
||||
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
|
||||
* if not limited)
|
||||
* @max_down: Maximum available downstream bandwidth for the DP tunnel
|
||||
* (%0 if not limited)
|
||||
*
|
||||
* Allocates a tunnel between @in and @out that is capable of tunneling
|
||||
* Display Port traffic.
|
||||
@ -716,7 +736,8 @@ err_free:
|
||||
* Return: Returns a tb_tunnel on success or NULL on failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
struct tb_port *out, int max_bw)
|
||||
struct tb_port *out, int max_up,
|
||||
int max_down)
|
||||
{
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_path **paths;
|
||||
@ -734,7 +755,8 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
|
||||
tunnel->src_port = in;
|
||||
tunnel->dst_port = out;
|
||||
tunnel->max_bw = max_bw;
|
||||
tunnel->max_up = max_up;
|
||||
tunnel->max_down = max_down;
|
||||
|
||||
paths = tunnel->paths;
|
||||
|
||||
@ -854,6 +876,33 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
||||
return tunnel;
|
||||
}
|
||||
|
||||
static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
|
||||
{
|
||||
int ret, up_max_rate, down_max_rate;
|
||||
|
||||
ret = usb4_usb3_port_max_link_rate(up);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
up_max_rate = ret;
|
||||
|
||||
ret = usb4_usb3_port_max_link_rate(down);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
down_max_rate = ret;
|
||||
|
||||
return min(up_max_rate, down_max_rate);
|
||||
}
|
||||
|
||||
static int tb_usb3_init(struct tb_tunnel *tunnel)
|
||||
{
|
||||
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
|
||||
tunnel->allocated_up, tunnel->allocated_down);
|
||||
|
||||
return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
|
||||
&tunnel->allocated_up,
|
||||
&tunnel->allocated_down);
|
||||
}
|
||||
|
||||
static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
|
||||
{
|
||||
int res;
|
||||
@ -868,6 +917,86 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *consumed_up, int *consumed_down)
|
||||
{
|
||||
/*
|
||||
* PCIe tunneling affects the USB3 bandwidth so take that it
|
||||
* into account here.
|
||||
*/
|
||||
*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
|
||||
*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
|
||||
&tunnel->allocated_up,
|
||||
&tunnel->allocated_down);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
|
||||
tunnel->allocated_up, tunnel->allocated_down);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *available_up,
|
||||
int *available_down)
|
||||
{
|
||||
int ret, max_rate, allocate_up, allocate_down;
|
||||
|
||||
ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
|
||||
if (ret <= 0) {
|
||||
tb_tunnel_warn(tunnel, "tunnel is not up\n");
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* 90% of the max rate can be allocated for isochronous
|
||||
* transfers.
|
||||
*/
|
||||
max_rate = ret * 90 / 100;
|
||||
|
||||
/* No need to reclaim if already at maximum */
|
||||
if (tunnel->allocated_up >= max_rate &&
|
||||
tunnel->allocated_down >= max_rate)
|
||||
return;
|
||||
|
||||
/* Don't go lower than what is already allocated */
|
||||
allocate_up = min(max_rate, *available_up);
|
||||
if (allocate_up < tunnel->allocated_up)
|
||||
allocate_up = tunnel->allocated_up;
|
||||
|
||||
allocate_down = min(max_rate, *available_down);
|
||||
if (allocate_down < tunnel->allocated_down)
|
||||
allocate_down = tunnel->allocated_down;
|
||||
|
||||
/* If no changes no need to do more */
|
||||
if (allocate_up == tunnel->allocated_up &&
|
||||
allocate_down == tunnel->allocated_down)
|
||||
return;
|
||||
|
||||
ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
|
||||
&allocate_down);
|
||||
if (ret) {
|
||||
tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tunnel->allocated_up = allocate_up;
|
||||
*available_up -= tunnel->allocated_up;
|
||||
|
||||
tunnel->allocated_down = allocate_down;
|
||||
*available_down -= tunnel->allocated_down;
|
||||
|
||||
tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
|
||||
tunnel->allocated_up, tunnel->allocated_down);
|
||||
}
|
||||
|
||||
static void tb_usb3_init_path(struct tb_path *path)
|
||||
{
|
||||
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
|
||||
@ -879,8 +1008,9 @@ static void tb_usb3_init_path(struct tb_path *path)
|
||||
path->drop_packages = 0;
|
||||
path->nfc_credits = 0;
|
||||
path->hops[0].initial_credits = 7;
|
||||
path->hops[1].initial_credits =
|
||||
tb_initial_credits(path->hops[1].in_port->sw);
|
||||
if (path->path_length > 1)
|
||||
path->hops[1].initial_credits =
|
||||
tb_initial_credits(path->hops[1].in_port->sw);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -947,6 +1077,29 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
|
||||
goto err_deactivate;
|
||||
}
|
||||
|
||||
if (!tb_route(down->sw)) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Read the initial bandwidth allocation for the first
|
||||
* hop tunnel.
|
||||
*/
|
||||
ret = usb4_usb3_port_allocated_bandwidth(down,
|
||||
&tunnel->allocated_up, &tunnel->allocated_down);
|
||||
if (ret)
|
||||
goto err_deactivate;
|
||||
|
||||
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
|
||||
tunnel->allocated_up, tunnel->allocated_down);
|
||||
|
||||
tunnel->init = tb_usb3_init;
|
||||
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
|
||||
tunnel->release_unused_bandwidth =
|
||||
tb_usb3_release_unused_bandwidth;
|
||||
tunnel->reclaim_available_bandwidth =
|
||||
tb_usb3_reclaim_available_bandwidth;
|
||||
}
|
||||
|
||||
tb_tunnel_dbg(tunnel, "discovered\n");
|
||||
return tunnel;
|
||||
|
||||
@ -963,6 +1116,10 @@ err_free:
|
||||
* @tb: Pointer to the domain structure
|
||||
* @up: USB3 upstream adapter port
|
||||
* @down: USB3 downstream adapter port
|
||||
* @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
|
||||
* if not limited).
|
||||
* @max_down: Maximum available downstream bandwidth for the USB3 tunnel
|
||||
* (%0 if not limited).
|
||||
*
|
||||
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
|
||||
* @TB_TYPE_USB3_DOWN.
|
||||
@ -970,10 +1127,32 @@ err_free:
|
||||
* Return: Returns a tb_tunnel on success or %NULL on failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
||||
struct tb_port *down)
|
||||
struct tb_port *down, int max_up,
|
||||
int max_down)
|
||||
{
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_path *path;
|
||||
int max_rate = 0;
|
||||
|
||||
/*
|
||||
* Check that we have enough bandwidth available for the new
|
||||
* USB3 tunnel.
|
||||
*/
|
||||
if (max_up > 0 || max_down > 0) {
|
||||
max_rate = tb_usb3_max_link_rate(down, up);
|
||||
if (max_rate < 0)
|
||||
return NULL;
|
||||
|
||||
/* Only 90% can be allocated for USB3 isochronous transfers */
|
||||
max_rate = max_rate * 90 / 100;
|
||||
tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
|
||||
max_rate);
|
||||
|
||||
if (max_rate > max_up || max_rate > max_down) {
|
||||
tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
|
||||
if (!tunnel)
|
||||
@ -982,6 +1161,8 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
||||
tunnel->activate = tb_usb3_activate;
|
||||
tunnel->src_port = down;
|
||||
tunnel->dst_port = up;
|
||||
tunnel->max_up = max_up;
|
||||
tunnel->max_down = max_down;
|
||||
|
||||
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
|
||||
"USB3 Down");
|
||||
@ -1001,6 +1182,18 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
||||
tb_usb3_init_path(path);
|
||||
tunnel->paths[TB_USB3_PATH_UP] = path;
|
||||
|
||||
if (!tb_route(down->sw)) {
|
||||
tunnel->allocated_up = max_rate;
|
||||
tunnel->allocated_down = max_rate;
|
||||
|
||||
tunnel->init = tb_usb3_init;
|
||||
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
|
||||
tunnel->release_unused_bandwidth =
|
||||
tb_usb3_release_unused_bandwidth;
|
||||
tunnel->reclaim_available_bandwidth =
|
||||
tb_usb3_reclaim_available_bandwidth;
|
||||
}
|
||||
|
||||
return tunnel;
|
||||
}
|
||||
|
||||
@ -1133,22 +1326,23 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_tunnel_switch_on_path() - Does the tunnel go through switch
|
||||
* tb_tunnel_port_on_path() - Does the tunnel go through port
|
||||
* @tunnel: Tunnel to check
|
||||
* @sw: Switch to check
|
||||
* @port: Port to check
|
||||
*
|
||||
* Returns true if @tunnel goes through @sw (direction does not matter),
|
||||
* Returns true if @tunnel goes through @port (direction does not matter),
|
||||
* false otherwise.
|
||||
*/
|
||||
bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
|
||||
const struct tb_switch *sw)
|
||||
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
|
||||
const struct tb_port *port)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tunnel->npaths; i++) {
|
||||
if (!tunnel->paths[i])
|
||||
continue;
|
||||
if (tb_path_switch_on_path(tunnel->paths[i], sw))
|
||||
|
||||
if (tb_path_port_on_path(tunnel->paths[i], port))
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1172,21 +1366,87 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
|
||||
/**
|
||||
* tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
|
||||
* @tunnel: Tunnel to check
|
||||
* @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
|
||||
* Can be %NULL.
|
||||
* @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
|
||||
* Can be %NULL.
|
||||
*
|
||||
* Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
|
||||
* is not active or does consume bandwidth.
|
||||
* Stores the amount of isochronous bandwidth @tunnel consumes in
|
||||
* @consumed_up and @consumed_down. In case of success returns %0,
|
||||
* negative errno otherwise.
|
||||
*/
|
||||
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
|
||||
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down)
|
||||
{
|
||||
int up_bw = 0, down_bw = 0;
|
||||
|
||||
if (!tb_tunnel_is_active(tunnel))
|
||||
goto out;
|
||||
|
||||
if (tunnel->consumed_bandwidth) {
|
||||
int ret;
|
||||
|
||||
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
|
||||
down_bw);
|
||||
}
|
||||
|
||||
out:
|
||||
if (consumed_up)
|
||||
*consumed_up = up_bw;
|
||||
if (consumed_down)
|
||||
*consumed_down = down_bw;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
|
||||
* @tunnel: Tunnel whose unused bandwidth to release
|
||||
*
|
||||
* If tunnel supports dynamic bandwidth management (USB3 tunnels at the
|
||||
* moment) this function makes it to release all the unused bandwidth.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
*/
|
||||
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
|
||||
{
|
||||
if (!tb_tunnel_is_active(tunnel))
|
||||
return 0;
|
||||
|
||||
if (tunnel->consumed_bandwidth) {
|
||||
int ret = tunnel->consumed_bandwidth(tunnel);
|
||||
if (tunnel->release_unused_bandwidth) {
|
||||
int ret;
|
||||
|
||||
tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
|
||||
return ret;
|
||||
ret = tunnel->release_unused_bandwidth(tunnel);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
|
||||
* @tunnel: Tunnel reclaiming available bandwidth
|
||||
* @available_up: Available upstream bandwidth (in Mb/s)
|
||||
* @available_down: Available downstream bandwidth (in Mb/s)
|
||||
*
|
||||
* Reclaims bandwidth from @available_up and @available_down and updates
|
||||
* the variables accordingly (e.g decreases both according to what was
|
||||
* reclaimed by the tunnel). If nothing was reclaimed the values are
|
||||
* kept as is.
|
||||
*/
|
||||
void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *available_up,
|
||||
int *available_down)
|
||||
{
|
||||
if (!tb_tunnel_is_active(tunnel))
|
||||
return;
|
||||
|
||||
if (tunnel->reclaim_available_bandwidth)
|
||||
tunnel->reclaim_available_bandwidth(tunnel, available_up,
|
||||
available_down);
|
||||
}
|
||||
|
@ -29,10 +29,16 @@ enum tb_tunnel_type {
|
||||
* @init: Optional tunnel specific initialization
|
||||
* @activate: Optional tunnel specific activation/deactivation
|
||||
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes
|
||||
* @release_unused_bandwidth: Release all unused bandwidth
|
||||
* @reclaim_available_bandwidth: Reclaim back available bandwidth
|
||||
* @list: Tunnels are linked using this field
|
||||
* @type: Type of the tunnel
|
||||
* @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP).
|
||||
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
|
||||
* Only set if the bandwidth needs to be limited.
|
||||
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
|
||||
* Only set if the bandwidth needs to be limited.
|
||||
* @allocated_up: Allocated upstream bandwidth (only for USB3)
|
||||
* @allocated_down: Allocated downstream bandwidth (only for USB3)
|
||||
*/
|
||||
struct tb_tunnel {
|
||||
struct tb *tb;
|
||||
@ -42,10 +48,18 @@ struct tb_tunnel {
|
||||
size_t npaths;
|
||||
int (*init)(struct tb_tunnel *tunnel);
|
||||
int (*activate)(struct tb_tunnel *tunnel, bool activate);
|
||||
int (*consumed_bandwidth)(struct tb_tunnel *tunnel);
|
||||
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down);
|
||||
int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
|
||||
void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
|
||||
int *available_up,
|
||||
int *available_down);
|
||||
struct list_head list;
|
||||
enum tb_tunnel_type type;
|
||||
unsigned int max_bw;
|
||||
int max_up;
|
||||
int max_down;
|
||||
int allocated_up;
|
||||
int allocated_down;
|
||||
};
|
||||
|
||||
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
|
||||
@ -53,23 +67,30 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
|
||||
struct tb_port *down);
|
||||
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
|
||||
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
struct tb_port *out, int max_bw);
|
||||
struct tb_port *out, int max_up,
|
||||
int max_down);
|
||||
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
||||
struct tb_port *dst, int transmit_ring,
|
||||
int transmit_path, int receive_ring,
|
||||
int receive_path);
|
||||
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
|
||||
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
||||
struct tb_port *down);
|
||||
struct tb_port *down, int max_up,
|
||||
int max_down);
|
||||
|
||||
void tb_tunnel_free(struct tb_tunnel *tunnel);
|
||||
int tb_tunnel_activate(struct tb_tunnel *tunnel);
|
||||
int tb_tunnel_restart(struct tb_tunnel *tunnel);
|
||||
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
|
||||
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
|
||||
bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
|
||||
const struct tb_switch *sw);
|
||||
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel);
|
||||
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
|
||||
const struct tb_port *port);
|
||||
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down);
|
||||
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
|
||||
void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *available_up,
|
||||
int *available_down);
|
||||
|
||||
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
|
||||
{
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#include "sb_regs.h"
|
||||
#include "tb.h"
|
||||
|
||||
#define USB4_DATA_DWORDS 16
|
||||
@ -27,6 +28,12 @@ enum usb4_switch_op {
|
||||
USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
|
||||
};
|
||||
|
||||
enum usb4_sb_target {
|
||||
USB4_SB_TARGET_ROUTER,
|
||||
USB4_SB_TARGET_PARTNER,
|
||||
USB4_SB_TARGET_RETIMER,
|
||||
};
|
||||
|
||||
#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
|
||||
#define USB4_NVM_READ_OFFSET_SHIFT 2
|
||||
#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
|
||||
@ -42,8 +49,8 @@ enum usb4_switch_op {
|
||||
|
||||
#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
|
||||
|
||||
typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t);
|
||||
typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t);
|
||||
typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
|
||||
typedef int (*write_block_fn)(void *, const void *, size_t);
|
||||
|
||||
static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
|
||||
u32 value, int timeout_msec)
|
||||
@ -95,8 +102,8 @@ static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
|
||||
return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
|
||||
}
|
||||
|
||||
static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
|
||||
void *buf, size_t size, read_block_fn read_block)
|
||||
static int usb4_do_read_data(u16 address, void *buf, size_t size,
|
||||
read_block_fn read_block, void *read_block_data)
|
||||
{
|
||||
unsigned int retries = USB4_DATA_RETRIES;
|
||||
unsigned int offset;
|
||||
@ -113,13 +120,10 @@ static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
|
||||
dwaddress = address / 4;
|
||||
dwords = ALIGN(nbytes, 4) / 4;
|
||||
|
||||
ret = read_block(sw, dwaddress, data, dwords);
|
||||
ret = read_block(read_block_data, dwaddress, data, dwords);
|
||||
if (ret) {
|
||||
if (ret == -ETIMEDOUT) {
|
||||
if (retries--)
|
||||
continue;
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret != -ENODEV && retries--)
|
||||
continue;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -133,8 +137,8 @@ static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
|
||||
const void *buf, size_t size, write_block_fn write_next_block)
|
||||
static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
|
||||
write_block_fn write_next_block, void *write_block_data)
|
||||
{
|
||||
unsigned int retries = USB4_DATA_RETRIES;
|
||||
unsigned int offset;
|
||||
@ -149,7 +153,7 @@ static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
|
||||
|
||||
memcpy(data + offset, buf, nbytes);
|
||||
|
||||
ret = write_next_block(sw, data, nbytes / 4);
|
||||
ret = write_next_block(write_block_data, data, nbytes / 4);
|
||||
if (ret) {
|
||||
if (ret == -ETIMEDOUT) {
|
||||
if (retries--)
|
||||
@ -192,6 +196,20 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool link_is_usb4(struct tb_port *port)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return false;
|
||||
|
||||
if (tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_18, 1))
|
||||
return false;
|
||||
|
||||
return !(val & PORT_CS_18_TCM);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_switch_setup() - Additional setup for USB4 device
|
||||
* @sw: USB4 router to setup
|
||||
@ -205,6 +223,7 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
|
||||
*/
|
||||
int usb4_switch_setup(struct tb_switch *sw)
|
||||
{
|
||||
struct tb_port *downstream_port;
|
||||
struct tb_switch *parent;
|
||||
bool tbt3, xhci;
|
||||
u32 val = 0;
|
||||
@ -217,6 +236,11 @@ int usb4_switch_setup(struct tb_switch *sw)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
parent = tb_switch_parent(sw);
|
||||
downstream_port = tb_port_at(tb_route(sw), parent);
|
||||
sw->link_usb4 = link_is_usb4(downstream_port);
|
||||
tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
|
||||
|
||||
xhci = val & ROUTER_CS_6_HCI;
|
||||
tbt3 = !(val & ROUTER_CS_6_TNS);
|
||||
|
||||
@ -227,9 +251,7 @@ int usb4_switch_setup(struct tb_switch *sw)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
parent = tb_switch_parent(sw);
|
||||
|
||||
if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
|
||||
if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
|
||||
val |= ROUTER_CS_5_UTO;
|
||||
xhci = false;
|
||||
}
|
||||
@ -271,10 +293,11 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
|
||||
return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
|
||||
}
|
||||
|
||||
static int usb4_switch_drom_read_block(struct tb_switch *sw,
|
||||
static int usb4_switch_drom_read_block(void *data,
|
||||
unsigned int dwaddress, void *buf,
|
||||
size_t dwords)
|
||||
{
|
||||
struct tb_switch *sw = data;
|
||||
u8 status = 0;
|
||||
u32 metadata;
|
||||
int ret;
|
||||
@ -311,8 +334,8 @@ static int usb4_switch_drom_read_block(struct tb_switch *sw,
|
||||
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
{
|
||||
return usb4_switch_do_read_data(sw, address, buf, size,
|
||||
usb4_switch_drom_read_block);
|
||||
return usb4_do_read_data(address, buf, size,
|
||||
usb4_switch_drom_read_block, sw);
|
||||
}
|
||||
|
||||
static int usb4_set_port_configured(struct tb_port *port, bool configured)
|
||||
@ -445,9 +468,10 @@ int usb4_switch_nvm_sector_size(struct tb_switch *sw)
|
||||
return metadata & USB4_NVM_SECTOR_SIZE_MASK;
|
||||
}
|
||||
|
||||
static int usb4_switch_nvm_read_block(struct tb_switch *sw,
|
||||
static int usb4_switch_nvm_read_block(void *data,
|
||||
unsigned int dwaddress, void *buf, size_t dwords)
|
||||
{
|
||||
struct tb_switch *sw = data;
|
||||
u8 status = 0;
|
||||
u32 metadata;
|
||||
int ret;
|
||||
@ -484,8 +508,8 @@ static int usb4_switch_nvm_read_block(struct tb_switch *sw,
|
||||
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
{
|
||||
return usb4_switch_do_read_data(sw, address, buf, size,
|
||||
usb4_switch_nvm_read_block);
|
||||
return usb4_do_read_data(address, buf, size,
|
||||
usb4_switch_nvm_read_block, sw);
|
||||
}
|
||||
|
||||
static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
|
||||
@ -510,9 +534,10 @@ static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
|
||||
return status ? -EIO : 0;
|
||||
}
|
||||
|
||||
static int usb4_switch_nvm_write_next_block(struct tb_switch *sw,
|
||||
const void *buf, size_t dwords)
|
||||
static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
|
||||
size_t dwords)
|
||||
{
|
||||
struct tb_switch *sw = data;
|
||||
u8 status;
|
||||
int ret;
|
||||
|
||||
@ -546,8 +571,8 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return usb4_switch_do_write_data(sw, address, buf, size,
|
||||
usb4_switch_nvm_write_next_block);
|
||||
return usb4_do_write_data(address, buf, size,
|
||||
usb4_switch_nvm_write_next_block, sw);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -710,7 +735,7 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
|
||||
if (!tb_port_is_pcie_down(p))
|
||||
continue;
|
||||
|
||||
if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p))
|
||||
if (pcie_idx == usb4_idx)
|
||||
return p;
|
||||
|
||||
pcie_idx++;
|
||||
@ -741,7 +766,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
|
||||
if (!tb_port_is_usb3_down(p))
|
||||
continue;
|
||||
|
||||
if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p))
|
||||
if (usb_idx == usb4_idx)
|
||||
return p;
|
||||
|
||||
usb_idx++;
|
||||
@ -769,3 +794,796 @@ int usb4_port_unlock(struct tb_port *port)
|
||||
val &= ~ADP_CS_4_LCK;
|
||||
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
|
||||
}
|
||||
|
||||
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
|
||||
u32 value, int timeout_msec)
|
||||
{
|
||||
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
|
||||
|
||||
do {
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((val & bit) == value)
|
||||
return 0;
|
||||
|
||||
usleep_range(50, 100);
|
||||
} while (ktime_before(ktime_get(), timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
|
||||
{
|
||||
if (dwords > USB4_DATA_DWORDS)
|
||||
return -EINVAL;
|
||||
|
||||
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
|
||||
dwords);
|
||||
}
|
||||
|
||||
static int usb4_port_write_data(struct tb_port *port, const void *data,
|
||||
size_t dwords)
|
||||
{
|
||||
if (dwords > USB4_DATA_DWORDS)
|
||||
return -EINVAL;
|
||||
|
||||
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
|
||||
dwords);
|
||||
}
|
||||
|
||||
static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, u8 reg, void *buf, u8 size)
|
||||
{
|
||||
size_t dwords = DIV_ROUND_UP(size, 4);
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return -EINVAL;
|
||||
|
||||
val = reg;
|
||||
val |= size << PORT_CS_1_LENGTH_SHIFT;
|
||||
val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
|
||||
if (target == USB4_SB_TARGET_RETIMER)
|
||||
val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
|
||||
val |= PORT_CS_1_PND;
|
||||
|
||||
ret = tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
|
||||
PORT_CS_1_PND, 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val & PORT_CS_1_NR)
|
||||
return -ENODEV;
|
||||
if (val & PORT_CS_1_RC)
|
||||
return -EIO;
|
||||
|
||||
return buf ? usb4_port_read_data(port, buf, dwords) : 0;
|
||||
}
|
||||
|
||||
static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, u8 reg, const void *buf, u8 size)
|
||||
{
|
||||
size_t dwords = DIV_ROUND_UP(size, 4);
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return -EINVAL;
|
||||
|
||||
if (buf) {
|
||||
ret = usb4_port_write_data(port, buf, dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = reg;
|
||||
val |= size << PORT_CS_1_LENGTH_SHIFT;
|
||||
val |= PORT_CS_1_WNR_WRITE;
|
||||
val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
|
||||
if (target == USB4_SB_TARGET_RETIMER)
|
||||
val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
|
||||
val |= PORT_CS_1_PND;
|
||||
|
||||
ret = tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
|
||||
PORT_CS_1_PND, 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val & PORT_CS_1_NR)
|
||||
return -ENODEV;
|
||||
if (val & PORT_CS_1_RC)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
|
||||
{
|
||||
ktime_t timeout;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
val = opcode;
|
||||
ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
|
||||
sizeof(val));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
timeout = ktime_add_ms(ktime_get(), timeout_msec);
|
||||
|
||||
do {
|
||||
/* Check results */
|
||||
ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
|
||||
&val, sizeof(val));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
return 0;
|
||||
|
||||
case USB4_SB_OPCODE_ERR:
|
||||
return -EAGAIN;
|
||||
|
||||
case USB4_SB_OPCODE_ONS:
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
default:
|
||||
if (val != opcode)
|
||||
return -EIO;
|
||||
break;
|
||||
}
|
||||
} while (ktime_before(ktime_get(), timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_enumerate_retimers() - Send RT broadcast transaction
|
||||
* @port: USB4 port
|
||||
*
|
||||
* This forces the USB4 port to send broadcast RT transaction which
|
||||
* makes the retimers on the link to assign index to themselves. Returns
|
||||
* %0 in case of success and negative errno if there was an error.
|
||||
*/
|
||||
int usb4_port_enumerate_retimers(struct tb_port *port)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
|
||||
return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
|
||||
USB4_SB_OPCODE, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
|
||||
enum usb4_sb_opcode opcode,
|
||||
int timeout_msec)
|
||||
{
|
||||
return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
|
||||
timeout_msec);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_read() - Read from retimer sideband registers
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
* @reg: Sideband register to read
|
||||
* @buf: Data from @reg is stored here
|
||||
* @size: Number of bytes to read
|
||||
*
|
||||
* Function reads retimer sideband registers starting from @reg. The
|
||||
* retimer is connected to @port at @index. Returns %0 in case of
|
||||
* success, and read data is copied to @buf. If there is no retimer
|
||||
* present at given @index returns %-ENODEV. In any other failure
|
||||
* returns negative errno.
|
||||
*/
|
||||
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
|
||||
u8 size)
|
||||
{
|
||||
return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
|
||||
size);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_write() - Write to retimer sideband registers
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
* @reg: Sideband register to write
|
||||
* @buf: Data that is written starting from @reg
|
||||
* @size: Number of bytes to write
|
||||
*
|
||||
* Writes retimer sideband registers starting from @reg. The retimer is
|
||||
* connected to @port at @index. Returns %0 in case of success. If there
|
||||
* is no retimer present at given @index returns %-ENODEV. In any other
|
||||
* failure returns negative errno.
|
||||
*/
|
||||
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
|
||||
const void *buf, u8 size)
|
||||
{
|
||||
return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
|
||||
size);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_is_last() - Is the retimer last on-board retimer
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
*
|
||||
* If the retimer at @index is last one (connected directly to the
|
||||
* Type-C port) this function returns %1. If it is not returns %0. If
|
||||
* the retimer is not present returns %-ENODEV. Otherwise returns
|
||||
* negative errno.
|
||||
*/
|
||||
int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
|
||||
{
|
||||
u32 metadata;
|
||||
int ret;
|
||||
|
||||
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
|
||||
500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
|
||||
sizeof(metadata));
|
||||
return ret ? ret : metadata & 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
*
|
||||
* Reads NVM sector size (in bytes) of a retimer at @index. This
|
||||
* operation can be used to determine whether the retimer supports NVM
|
||||
* upgrade for example. Returns sector size in bytes or negative errno
|
||||
* in case of error. Specifically returns %-ENODEV if there is no
|
||||
* retimer at @index.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
|
||||
{
|
||||
u32 metadata;
|
||||
int ret;
|
||||
|
||||
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
|
||||
500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
|
||||
sizeof(metadata));
|
||||
return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
|
||||
}
|
||||
|
||||
static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
|
||||
unsigned int address)
|
||||
{
|
||||
u32 metadata, dwaddress;
|
||||
int ret;
|
||||
|
||||
dwaddress = address / 4;
|
||||
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
|
||||
USB4_NVM_SET_OFFSET_MASK;
|
||||
|
||||
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
|
||||
sizeof(metadata));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
|
||||
500);
|
||||
}
|
||||
|
||||
struct retimer_info {
|
||||
struct tb_port *port;
|
||||
u8 index;
|
||||
};
|
||||
|
||||
static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
|
||||
size_t dwords)
|
||||
|
||||
{
|
||||
const struct retimer_info *info = data;
|
||||
struct tb_port *port = info->port;
|
||||
u8 index = info->index;
|
||||
int ret;
|
||||
|
||||
ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
|
||||
buf, dwords * 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return usb4_port_retimer_op(port, index,
|
||||
USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_nvm_write() - Write to retimer NVM
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
* @address: Byte address where to start the write
|
||||
* @buf: Data to write
|
||||
* @size: Size in bytes how much to write
|
||||
*
|
||||
* Writes @size bytes from @buf to the retimer NVM. Used for NVM
|
||||
* upgrade. Returns %0 if the data was written successfully and negative
|
||||
* errno in case of failure. Specifically returns %-ENODEV if there is
|
||||
* no retimer at @index.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
|
||||
const void *buf, size_t size)
|
||||
{
|
||||
struct retimer_info info = { .port = port, .index = index };
|
||||
int ret;
|
||||
|
||||
ret = usb4_port_retimer_nvm_set_offset(port, index, address);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return usb4_do_write_data(address, buf, size,
|
||||
usb4_port_retimer_nvm_write_next_block, &info);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
*
|
||||
* After the new NVM image has been written via usb4_port_retimer_nvm_write()
|
||||
* this function can be used to trigger the NVM upgrade process. If
|
||||
* successful the retimer restarts with the new NVM and may not have the
|
||||
* index set so one needs to call usb4_port_enumerate_retimers() to
|
||||
* force index to be assigned.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* We need to use the raw operation here because once the
|
||||
* authentication completes the retimer index is not set anymore
|
||||
* so we do not get back the status now.
|
||||
*/
|
||||
val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
|
||||
return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
|
||||
USB4_SB_OPCODE, &val, sizeof(val));
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
* @status: Raw status code read from metadata
|
||||
*
|
||||
* This can be called after usb4_port_retimer_nvm_authenticate() and
|
||||
* usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
|
||||
*
|
||||
* Returns %0 if the authentication status was successfully read. The
|
||||
* completion metadata (the result) is then stored into @status. If
|
||||
* reading the status fails, returns negative errno.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
|
||||
u32 *status)
|
||||
{
|
||||
u32 metadata, val;
|
||||
int ret;
|
||||
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
|
||||
sizeof(val));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
*status = 0;
|
||||
return 0;
|
||||
|
||||
case USB4_SB_OPCODE_ERR:
|
||||
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
|
||||
&metadata, sizeof(metadata));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
|
||||
return 0;
|
||||
|
||||
case USB4_SB_OPCODE_ONS:
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
|
||||
void *buf, size_t dwords)
|
||||
{
|
||||
const struct retimer_info *info = data;
|
||||
struct tb_port *port = info->port;
|
||||
u8 index = info->index;
|
||||
u32 metadata;
|
||||
int ret;
|
||||
|
||||
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
|
||||
if (dwords < USB4_DATA_DWORDS)
|
||||
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
|
||||
|
||||
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
|
||||
sizeof(metadata));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
|
||||
dwords * 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_retimer_nvm_read() - Read contents of retimer NVM
|
||||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
* @address: NVM address (in bytes) to start reading
|
||||
* @buf: Data read from NVM is stored here
|
||||
* @size: Number of bytes to read
|
||||
*
|
||||
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
|
||||
* read was successful and negative errno in case of failure.
|
||||
* Specifically returns %-ENODEV if there is no retimer at @index.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
|
||||
unsigned int address, void *buf, size_t size)
|
||||
{
|
||||
struct retimer_info info = { .port = port, .index = index };
|
||||
|
||||
return usb4_do_read_data(address, buf, size,
|
||||
usb4_port_retimer_nvm_read_block, &info);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
|
||||
* @port: USB3 adapter port
|
||||
*
|
||||
* Return maximum supported link rate of a USB3 adapter in Mb/s.
|
||||
* Negative errno in case of error.
|
||||
*/
|
||||
int usb4_usb3_port_max_link_rate(struct tb_port *port)
|
||||
{
|
||||
int ret, lr;
|
||||
u32 val;
|
||||
|
||||
if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
|
||||
return -EINVAL;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_4, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
|
||||
return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_usb3_port_actual_link_rate() - Established USB3 link rate
|
||||
* @port: USB3 adapter port
|
||||
*
|
||||
* Return actual established link rate of a USB3 adapter in Mb/s. If the
|
||||
* link is not up returns %0 and negative errno in case of failure.
|
||||
*/
|
||||
int usb4_usb3_port_actual_link_rate(struct tb_port *port)
|
||||
{
|
||||
int ret, lr;
|
||||
u32 val;
|
||||
|
||||
if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
|
||||
return -EINVAL;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_4, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!(val & ADP_USB3_CS_4_ULV))
|
||||
return 0;
|
||||
|
||||
lr = val & ADP_USB3_CS_4_ALR_MASK;
|
||||
return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
|
||||
}
|
||||
|
||||
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (!tb_port_is_usb3_down(port))
|
||||
return -EINVAL;
|
||||
if (tb_route(port->sw))
|
||||
return -EINVAL;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_2, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (request)
|
||||
val |= ADP_USB3_CS_2_CMR;
|
||||
else
|
||||
val &= ~ADP_USB3_CS_2_CMR;
|
||||
|
||||
ret = tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_2, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We can use val here directly as the CMR bit is in the same place
|
||||
* as HCA. Just mask out others.
|
||||
*/
|
||||
val &= ADP_USB3_CS_2_CMR;
|
||||
return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
|
||||
ADP_USB3_CS_1_HCA, val, 1500);
|
||||
}
|
||||
|
||||
static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
|
||||
{
|
||||
return usb4_usb3_port_cm_request(port, true);
|
||||
}
|
||||
|
||||
static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
|
||||
{
|
||||
return usb4_usb3_port_cm_request(port, false);
|
||||
}
|
||||
|
||||
static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
|
||||
{
|
||||
unsigned long uframes;
|
||||
|
||||
uframes = bw * 512UL << scale;
|
||||
return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
|
||||
}
|
||||
|
||||
static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
|
||||
{
|
||||
unsigned long uframes;
|
||||
|
||||
/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
|
||||
uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
|
||||
return DIV_ROUND_UP(uframes, 512UL << scale);
|
||||
}
|
||||
|
||||
static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
|
||||
int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
{
|
||||
u32 val, bw, scale;
|
||||
int ret;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_2, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_port_read(port, &scale, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_3, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
scale &= ADP_USB3_CS_3_SCALE_MASK;
|
||||
|
||||
bw = val & ADP_USB3_CS_2_AUBW_MASK;
|
||||
*upstream_bw = usb3_bw_to_mbps(bw, scale);
|
||||
|
||||
bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
|
||||
*downstream_bw = usb3_bw_to_mbps(bw, scale);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
|
||||
* @port: USB3 adapter port
|
||||
* @upstream_bw: Allocated upstream bandwidth is stored here
|
||||
* @downstream_bw: Allocated downstream bandwidth is stored here
|
||||
*
|
||||
* Stores currently allocated USB3 bandwidth into @upstream_bw and
|
||||
* @downstream_bw in Mb/s. Returns %0 in case of success and negative
|
||||
* errno in failure.
|
||||
*/
|
||||
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = usb4_usb3_port_set_cm_request(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
|
||||
downstream_bw);
|
||||
usb4_usb3_port_clear_cm_request(port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
|
||||
int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
{
|
||||
u32 val, bw, scale;
|
||||
int ret;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_port_read(port, &scale, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_3, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
scale &= ADP_USB3_CS_3_SCALE_MASK;
|
||||
|
||||
bw = val & ADP_USB3_CS_1_CUBW_MASK;
|
||||
*upstream_bw = usb3_bw_to_mbps(bw, scale);
|
||||
|
||||
bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
|
||||
*downstream_bw = usb3_bw_to_mbps(bw, scale);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
|
||||
int upstream_bw,
|
||||
int downstream_bw)
|
||||
{
|
||||
u32 val, ubw, dbw, scale;
|
||||
int ret;
|
||||
|
||||
/* Read the used scale, hardware default is 0 */
|
||||
ret = tb_port_read(port, &scale, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_3, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
scale &= ADP_USB3_CS_3_SCALE_MASK;
|
||||
ubw = mbps_to_usb3_bw(upstream_bw, scale);
|
||||
dbw = mbps_to_usb3_bw(downstream_bw, scale);
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_2, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
|
||||
val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
|
||||
val |= ubw;
|
||||
|
||||
return tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_USB3_CS_2, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
|
||||
* @port: USB3 adapter port
|
||||
* @upstream_bw: New upstream bandwidth
|
||||
* @downstream_bw: New downstream bandwidth
|
||||
*
|
||||
* This can be used to set how much bandwidth is allocated for the USB3
|
||||
* tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
|
||||
* new values programmed to the USB3 adapter allocation registers. If
|
||||
* the values are lower than what is currently consumed the allocation
|
||||
* is set to what is currently consumed instead (consumed bandwidth
|
||||
* cannot be taken away by CM). The actual new values are returned in
|
||||
* @upstream_bw and @downstream_bw.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno if there was a
|
||||
* failure.
|
||||
*/
|
||||
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
{
|
||||
int ret, consumed_up, consumed_down, allocate_up, allocate_down;
|
||||
|
||||
ret = usb4_usb3_port_set_cm_request(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
|
||||
&consumed_down);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
/* Don't allow it go lower than what is consumed */
|
||||
allocate_up = max(*upstream_bw, consumed_up);
|
||||
allocate_down = max(*downstream_bw, consumed_down);
|
||||
|
||||
ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
|
||||
allocate_down);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
*upstream_bw = allocate_up;
|
||||
*downstream_bw = allocate_down;
|
||||
|
||||
err_request:
|
||||
usb4_usb3_port_clear_cm_request(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
|
||||
* @port: USB3 adapter port
|
||||
* @upstream_bw: New allocated upstream bandwidth
|
||||
* @downstream_bw: New allocated downstream bandwidth
|
||||
*
|
||||
* Releases USB3 allocated bandwidth down to what is actually consumed.
|
||||
* The new bandwidth is returned in @upstream_bw and @downstream_bw.
|
||||
*
|
||||
* Returns 0% in success and negative errno in case of failure.
|
||||
*/
|
||||
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
{
|
||||
int ret, consumed_up, consumed_down;
|
||||
|
||||
ret = usb4_usb3_port_set_cm_request(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
|
||||
&consumed_down);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
/*
|
||||
* Always keep 1000 Mb/s to make sure xHCI has at least some
|
||||
* bandwidth available for isochronous traffic.
|
||||
*/
|
||||
if (consumed_up < 1000)
|
||||
consumed_up = 1000;
|
||||
if (consumed_down < 1000)
|
||||
consumed_down = 1000;
|
||||
|
||||
ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
|
||||
consumed_down);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
*upstream_bw = consumed_up;
|
||||
*downstream_bw = consumed_down;
|
||||
|
||||
err_request:
|
||||
usb4_usb3_port_clear_cm_request(port);
|
||||
return ret;
|
||||
}
|
||||
|
@ -501,6 +501,55 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
|
||||
|
||||
static int rebuild_property_block(void)
|
||||
{
|
||||
u32 *block, len;
|
||||
int ret;
|
||||
|
||||
ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
len = ret;
|
||||
|
||||
block = kcalloc(len, sizeof(u32), GFP_KERNEL);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = tb_property_format_dir(xdomain_property_dir, block, len);
|
||||
if (ret) {
|
||||
kfree(block);
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(xdomain_property_block);
|
||||
xdomain_property_block = block;
|
||||
xdomain_property_block_len = len;
|
||||
xdomain_property_block_gen++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void finalize_property_block(void)
|
||||
{
|
||||
const struct tb_property *nodename;
|
||||
|
||||
/*
|
||||
* On first XDomain connection we set up the the system
|
||||
* nodename. This delayed here because userspace may not have it
|
||||
* set when the driver is first probed.
|
||||
*/
|
||||
mutex_lock(&xdomain_lock);
|
||||
nodename = tb_property_find(xdomain_property_dir, "deviceid",
|
||||
TB_PROPERTY_TYPE_TEXT);
|
||||
if (!nodename) {
|
||||
tb_property_add_text(xdomain_property_dir, "deviceid",
|
||||
utsname()->nodename);
|
||||
rebuild_property_block();
|
||||
}
|
||||
mutex_unlock(&xdomain_lock);
|
||||
}
|
||||
|
||||
static void tb_xdp_handle_request(struct work_struct *work)
|
||||
{
|
||||
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
|
||||
@ -529,6 +578,8 @@ static void tb_xdp_handle_request(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
finalize_property_block();
|
||||
|
||||
switch (pkg->type) {
|
||||
case PROPERTIES_REQUEST:
|
||||
ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
|
||||
@ -1569,35 +1620,6 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
|
||||
return ret > 0;
|
||||
}
|
||||
|
||||
static int rebuild_property_block(void)
|
||||
{
|
||||
u32 *block, len;
|
||||
int ret;
|
||||
|
||||
ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
len = ret;
|
||||
|
||||
block = kcalloc(len, sizeof(u32), GFP_KERNEL);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = tb_property_format_dir(xdomain_property_dir, block, len);
|
||||
if (ret) {
|
||||
kfree(block);
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(xdomain_property_block);
|
||||
xdomain_property_block = block;
|
||||
xdomain_property_block_len = len;
|
||||
xdomain_property_block_gen++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_xdomain(struct device *dev, void *data)
|
||||
{
|
||||
struct tb_xdomain *xd;
|
||||
@ -1702,8 +1724,6 @@ EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
|
||||
|
||||
int tb_xdomain_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
xdomain_property_dir = tb_property_create_dir(NULL);
|
||||
if (!xdomain_property_dir)
|
||||
return -ENOMEM;
|
||||
@ -1712,22 +1732,16 @@ int tb_xdomain_init(void)
|
||||
* Initialize standard set of properties without any service
|
||||
* directories. Those will be added by service drivers
|
||||
* themselves when they are loaded.
|
||||
*
|
||||
* We also add node name later when first connection is made.
|
||||
*/
|
||||
tb_property_add_immediate(xdomain_property_dir, "vendorid",
|
||||
PCI_VENDOR_ID_INTEL);
|
||||
tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
|
||||
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
|
||||
tb_property_add_text(xdomain_property_dir, "deviceid",
|
||||
utsname()->nodename);
|
||||
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
|
||||
|
||||
ret = rebuild_property_block();
|
||||
if (ret) {
|
||||
tb_property_free_dir(xdomain_property_dir);
|
||||
xdomain_property_dir = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tb_xdomain_exit(void)
|
||||
|
@ -504,8 +504,6 @@ struct tb_ring {
|
||||
#define RING_FLAG_NO_SUSPEND BIT(0)
|
||||
/* Configure the ring to be in frame mode */
|
||||
#define RING_FLAG_FRAME BIT(1)
|
||||
/* Enable end-to-end flow control */
|
||||
#define RING_FLAG_E2E BIT(2)
|
||||
|
||||
struct ring_frame;
|
||||
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
|
||||
|
Loading…
Reference in New Issue
Block a user