mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
thunderbolt: Changes for v6.8 merge window
This includes following USB4/Thunderbolt changes for the v6.8 merge window: - Intel Lunar Lake support - PCIe tunneling improvements - DisplayPort tunneling improvements - Asymmetric switching improvements - Couple of minor fixes and cleanups. All these have been in linux-next with no reported issues. -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmWUAPogHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKDmuhAAgL4vManzvZn4 OykdKEFG2nl/1r66SnhM1F30qRzoBs3LuFWfEkPr17Lw0u4yR3QccJS39Z2/GuOV vXpnSRv8AxWSajpmFiZTTPh7zkRtMkl0PHvyhmjBMBZus+8w5iwWMO3PQtjUY+fl 2OpCMv/bFDD90PIox4JaYXaHXxuny4ZcT9BuuXx79ZwlRWG0cnRKtX8ythKqOpIV +J/Sz0nGjXgIbecjsqa03tFfNZZUi0WNV3XpmrFI2kskW6iVlSgPK4k0+WsCKrNT ZWu0EA7J6Uch15kh7CxHXHqywX9RkNmS5HImUS1SUxtiyYdOlS4XC5IcRr9EElss LGs//zDkFEfAbD06Ykl7//sL/mGVIJ8o4SqGd/OsnLZapV2v507M+0DJOuyQNHvD y3NicERYqVyb/3A+FJcuW8k1gYIEA4LFWoQO3kIfs5mFBu36Q193J3sj/grLu2rc DOLGnx8lhHCNvliEqM6kl/h3GnUmF2bqP0jmttszHyn1TWOUo5uYLQcinsSVTh/n GNRHJ0vepVLPNLogiSeULgV4gHytPZIF0BnyQ/xBmnyAJYw/un8qWHZTw72DoUA+ X3BzgcjeNx9NVI0XRZt/iEyiVSFHXfwQ6A7PUATPvrTa64AqEf2dT3j7gkaibor0 C5csepYZuoX0WIAZMuWX34/NfoB+5i8= =N7QT -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into char-misc-next Mika writes: thunderbolt: Changes for v6.8 merge window This includes following USB4/Thunderbolt changes for the v6.8 merge window: - Intel Lunar Lake support - PCIe tunneling improvements - DisplayPort tunneling improvements - Asymmetric switching improvements - Couple of minor fixes and cleanups. All these have been in linux-next with no reported issues. * tag 'thunderbolt-for-v6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: thunderbolt: Reduce retry timeout to speed up boot for some devices thunderbolt: Keep link as asymmetric if preferred by hardware thunderbolt: Add support for Intel Lunar Lake thunderbolt: Disable PCIe extended encapsulation upon teardown properly thunderbolt: Make PCIe tunnel setup and teardown follow CM guide thunderbolt: Improve logging when DisplayPort resource is added due to hotplug thunderbolt: Use tb_dp_read_cap() to read DP_COMMON_CAP as well thunderbolt: Disable CL states only when actually needed thunderbolt: Transition link to asymmetric only when both sides support it thunderbolt: Log XDomain link speed and width thunderbolt: Move width_name() helper to tb.h thunderbolt: Handle lane bonding of Gen 4 XDomain links properly thunderbolt: Unwind TMU configuration if tb_switch_set_tmu_mode_params() fails thunderbolt: Remove duplicated re-assignment of pointer 'out'
This commit is contained in:
commit
21bea26c84
@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
|
||||
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
|
||||
1, 10, 2000);
|
||||
1, 10, 250);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1517,6 +1517,10 @@ static struct pci_device_id nhi_ids[] = {
|
||||
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
|
||||
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
|
||||
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
|
||||
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
|
||||
|
||||
|
@ -90,6 +90,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
|
||||
#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e
|
||||
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
|
||||
#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
|
||||
#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
|
||||
|
||||
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
|
||||
|
||||
|
@ -941,22 +941,6 @@ int tb_port_get_link_generation(struct tb_port *port)
|
||||
}
|
||||
}
|
||||
|
||||
static const char *width_name(enum tb_link_width width)
|
||||
{
|
||||
switch (width) {
|
||||
case TB_LINK_WIDTH_SINGLE:
|
||||
return "symmetric, single lane";
|
||||
case TB_LINK_WIDTH_DUAL:
|
||||
return "symmetric, dual lanes";
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
return "asymmetric, 3 transmitters, 1 receiver";
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
return "asymmetric, 3 receivers, 1 transmitter";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_port_get_link_width() - Get current link width
|
||||
* @port: Port to check (USB4 or CIO)
|
||||
@ -2769,7 +2753,7 @@ static void tb_switch_link_init(struct tb_switch *sw)
|
||||
return;
|
||||
|
||||
tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
|
||||
tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
|
||||
tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width));
|
||||
|
||||
bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
|
||||
|
||||
@ -2789,6 +2773,19 @@ static void tb_switch_link_init(struct tb_switch *sw)
|
||||
if (down->dual_link_port)
|
||||
down->dual_link_port->bonded = bonded;
|
||||
tb_port_update_credits(down);
|
||||
|
||||
if (tb_port_get_link_generation(up) < 4)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Set the Gen 4 preferred link width. This is what the router
|
||||
* prefers when the link is brought up. If the router does not
|
||||
* support asymmetric link configuration, this also will be set
|
||||
* to TB_LINK_WIDTH_DUAL.
|
||||
*/
|
||||
sw->preferred_link_width = sw->link_width;
|
||||
tb_sw_dbg(sw, "preferred link width %s\n",
|
||||
tb_width_name(sw->preferred_link_width));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3029,7 +3026,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
|
||||
|
||||
tb_switch_update_link_attributes(sw);
|
||||
|
||||
tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
|
||||
tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -513,8 +513,6 @@ static void tb_port_unconfigure_xdomain(struct tb_port *port)
|
||||
usb4_port_unconfigure_xdomain(port);
|
||||
else
|
||||
tb_lc_unconfigure_xdomain(port);
|
||||
|
||||
tb_port_enable(port->dual_link_port);
|
||||
}
|
||||
|
||||
static void tb_scan_xdomain(struct tb_port *port)
|
||||
@ -1087,15 +1085,14 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, int requested_up,
|
||||
int requested_down)
|
||||
{
|
||||
bool clx = false, clx_disabled = false, downstream;
|
||||
struct tb_switch *sw;
|
||||
bool clx, downstream;
|
||||
struct tb_port *up;
|
||||
int ret = 0;
|
||||
|
||||
if (!asym_threshold)
|
||||
return 0;
|
||||
|
||||
/* Disable CL states before doing any transitions */
|
||||
downstream = tb_port_path_direction_downstream(src_port, dst_port);
|
||||
/* Pick up router deepest in the hierarchy */
|
||||
if (downstream)
|
||||
@ -1103,11 +1100,10 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
else
|
||||
sw = src_port->sw;
|
||||
|
||||
clx = tb_disable_clx(sw);
|
||||
|
||||
tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
|
||||
struct tb_port *down = tb_switch_downstream_port(up->sw);
|
||||
enum tb_link_width width_up, width_down;
|
||||
int consumed_up, consumed_down;
|
||||
enum tb_link_width width;
|
||||
|
||||
ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
|
||||
&consumed_up, &consumed_down);
|
||||
@ -1128,7 +1124,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
if (consumed_down + requested_down < asym_threshold)
|
||||
continue;
|
||||
|
||||
width = TB_LINK_WIDTH_ASYM_RX;
|
||||
width_up = TB_LINK_WIDTH_ASYM_RX;
|
||||
width_down = TB_LINK_WIDTH_ASYM_TX;
|
||||
} else {
|
||||
/* Upstream, the opposite of above */
|
||||
if (consumed_down + requested_down >= TB_ASYM_MIN) {
|
||||
@ -1138,22 +1135,34 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
if (consumed_up + requested_up < asym_threshold)
|
||||
continue;
|
||||
|
||||
width = TB_LINK_WIDTH_ASYM_TX;
|
||||
width_up = TB_LINK_WIDTH_ASYM_TX;
|
||||
width_down = TB_LINK_WIDTH_ASYM_RX;
|
||||
}
|
||||
|
||||
if (up->sw->link_width == width)
|
||||
if (up->sw->link_width == width_up)
|
||||
continue;
|
||||
|
||||
if (!tb_port_width_supported(up, width))
|
||||
if (!tb_port_width_supported(up, width_up) ||
|
||||
!tb_port_width_supported(down, width_down))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Disable CL states before doing any transitions. We
|
||||
* delayed it until now that we know there is a real
|
||||
* transition taking place.
|
||||
*/
|
||||
if (!clx_disabled) {
|
||||
clx = tb_disable_clx(sw);
|
||||
clx_disabled = true;
|
||||
}
|
||||
|
||||
tb_sw_dbg(up->sw, "configuring asymmetric link\n");
|
||||
|
||||
/*
|
||||
* Here requested + consumed > threshold so we need to
|
||||
* transtion the link into asymmetric now.
|
||||
*/
|
||||
ret = tb_switch_set_link_width(up->sw, width);
|
||||
ret = tb_switch_set_link_width(up->sw, width_up);
|
||||
if (ret) {
|
||||
tb_sw_warn(up->sw, "failed to set link width\n");
|
||||
break;
|
||||
@ -1174,24 +1183,24 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
* @dst_port: Destination adapter
|
||||
* @requested_up: New lower bandwidth request upstream (Mb/s)
|
||||
* @requested_down: New lower bandwidth request downstream (Mb/s)
|
||||
* @keep_asym: Keep asymmetric link if preferred
|
||||
*
|
||||
* Goes over each link from @src_port to @dst_port and tries to
|
||||
* transition the link to symmetric if the currently consumed bandwidth
|
||||
* allows.
|
||||
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
|
||||
*/
|
||||
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, int requested_up,
|
||||
int requested_down)
|
||||
int requested_down, bool keep_asym)
|
||||
{
|
||||
bool clx = false, clx_disabled = false, downstream;
|
||||
struct tb_switch *sw;
|
||||
bool clx, downstream;
|
||||
struct tb_port *up;
|
||||
int ret = 0;
|
||||
|
||||
if (!asym_threshold)
|
||||
return 0;
|
||||
|
||||
/* Disable CL states before doing any transitions */
|
||||
downstream = tb_port_path_direction_downstream(src_port, dst_port);
|
||||
/* Pick up router deepest in the hierarchy */
|
||||
if (downstream)
|
||||
@ -1199,8 +1208,6 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
|
||||
else
|
||||
sw = src_port->sw;
|
||||
|
||||
clx = tb_disable_clx(sw);
|
||||
|
||||
tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
|
||||
int consumed_up, consumed_down;
|
||||
|
||||
@ -1233,6 +1240,25 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
|
||||
if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Here consumed < threshold so we can transition the
|
||||
* link to symmetric.
|
||||
*
|
||||
* However, if the router prefers asymmetric link we
|
||||
* honor that (unless @keep_asym is %false).
|
||||
*/
|
||||
if (keep_asym &&
|
||||
up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
|
||||
tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Disable CL states before doing any transitions */
|
||||
if (!clx_disabled) {
|
||||
clx = tb_disable_clx(sw);
|
||||
clx_disabled = true;
|
||||
}
|
||||
|
||||
tb_sw_dbg(up->sw, "configuring symmetric link\n");
|
||||
|
||||
ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
|
||||
@ -1280,7 +1306,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
|
||||
struct tb_port *host_port;
|
||||
|
||||
host_port = tb_port_at(tb_route(sw), tb->root_switch);
|
||||
tb_configure_sym(tb, host_port, up, 0, 0);
|
||||
tb_configure_sym(tb, host_port, up, 0, 0, false);
|
||||
}
|
||||
|
||||
/* Set the link configured */
|
||||
@ -1465,7 +1491,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
|
||||
* If bandwidth on a link is < asym_threshold
|
||||
* transition the link to symmetric.
|
||||
*/
|
||||
tb_configure_sym(tb, src_port, dst_port, 0, 0);
|
||||
tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
|
||||
/* Now we can allow the domain to runtime suspend again */
|
||||
pm_runtime_mark_last_busy(&dst_port->sw->dev);
|
||||
pm_runtime_put_autosuspend(&dst_port->sw->dev);
|
||||
@ -1901,7 +1927,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
|
||||
return;
|
||||
}
|
||||
|
||||
tb_port_dbg(port, "DP %s resource available\n",
|
||||
tb_port_dbg(port, "DP %s resource available after hotplug\n",
|
||||
tb_port_is_dpin(port) ? "IN" : "OUT");
|
||||
list_add_tail(&port->list, &tcm->dp_resources);
|
||||
|
||||
@ -2287,7 +2313,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
|
||||
* If bandwidth on a link is < asym_threshold transition
|
||||
* the link to symmetric.
|
||||
*/
|
||||
tb_configure_sym(tb, in, out, *requested_up, *requested_down);
|
||||
tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
|
||||
/*
|
||||
* If requested bandwidth is less or equal than what is
|
||||
* currently allocated to that tunnel we simply change
|
||||
@ -2330,7 +2356,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
|
||||
ret = tb_configure_asym(tb, in, out, *requested_up,
|
||||
*requested_down);
|
||||
if (ret) {
|
||||
tb_configure_sym(tb, in, out, 0, 0);
|
||||
tb_configure_sym(tb, in, out, 0, 0, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2338,7 +2364,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
|
||||
requested_down);
|
||||
if (ret) {
|
||||
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
|
||||
tb_configure_sym(tb, in, out, 0, 0);
|
||||
tb_configure_sym(tb, in, out, 0, 0, true);
|
||||
}
|
||||
} else {
|
||||
ret = -ENOBUFS;
|
||||
|
@ -125,6 +125,7 @@ struct tb_switch_tmu {
|
||||
* @device_name: Name of the device (or %NULL if not known)
|
||||
* @link_speed: Speed of the link in Gb/s
|
||||
* @link_width: Width of the upstream facing link
|
||||
* @preferred_link_width: Router preferred link width (only set for Gen 4 links)
|
||||
* @link_usb4: Upstream link is USB4
|
||||
* @generation: Switch Thunderbolt generation
|
||||
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
|
||||
@ -178,6 +179,7 @@ struct tb_switch {
|
||||
const char *device_name;
|
||||
unsigned int link_speed;
|
||||
enum tb_link_width link_width;
|
||||
enum tb_link_width preferred_link_width;
|
||||
bool link_usb4;
|
||||
unsigned int generation;
|
||||
int cap_plug_events;
|
||||
@ -568,6 +570,22 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
|
||||
return &sw->ports[port];
|
||||
}
|
||||
|
||||
static inline const char *tb_width_name(enum tb_link_width width)
|
||||
{
|
||||
switch (width) {
|
||||
case TB_LINK_WIDTH_SINGLE:
|
||||
return "symmetric, single lane";
|
||||
case TB_LINK_WIDTH_DUAL:
|
||||
return "symmetric, dual lanes";
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
return "asymmetric, 3 transmitters, 1 receiver";
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
return "asymmetric, 3 receivers, 1 transmitter";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_port_has_remote() - Does the port have switch connected downstream
|
||||
* @port: Port to check
|
||||
|
@ -894,7 +894,7 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw)
|
||||
|
||||
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
/* Program the new mode and the downstream router lane adapter */
|
||||
switch (sw->tmu.mode_request) {
|
||||
|
@ -173,16 +173,28 @@ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
|
||||
int ret;
|
||||
|
||||
/* Only supported of both routers are at least USB4 v2 */
|
||||
if (tb_port_get_link_generation(port) < 4)
|
||||
if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
|
||||
(usb4_switch_version(tunnel->dst_port->sw) < 2))
|
||||
return 0;
|
||||
|
||||
if (enable && tb_port_get_link_generation(port) < 4)
|
||||
return 0;
|
||||
|
||||
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Downstream router could be unplugged so disable of encapsulation
|
||||
* in upstream router is still possible.
|
||||
*/
|
||||
ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
if (enable)
|
||||
return ret;
|
||||
if (ret != -ENODEV)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
|
||||
str_enabled_disabled(enable));
|
||||
@ -199,14 +211,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
|
||||
return res;
|
||||
}
|
||||
|
||||
res = tb_pci_port_enable(tunnel->src_port, activate);
|
||||
if (activate)
|
||||
res = tb_pci_port_enable(tunnel->dst_port, activate);
|
||||
else
|
||||
res = tb_pci_port_enable(tunnel->src_port, activate);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (tb_port_is_pcie_up(tunnel->dst_port)) {
|
||||
res = tb_pci_port_enable(tunnel->dst_port, activate);
|
||||
|
||||
if (activate) {
|
||||
res = tb_pci_port_enable(tunnel->src_port, activate);
|
||||
if (res)
|
||||
return res;
|
||||
} else {
|
||||
/* Downstream router could be unplugged */
|
||||
tb_pci_port_enable(tunnel->dst_port, activate);
|
||||
}
|
||||
|
||||
return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
|
||||
@ -1067,8 +1086,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
|
||||
int timeout_msec)
|
||||
static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
|
||||
{
|
||||
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
@ -1087,15 +1105,13 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
|
||||
return ret;
|
||||
|
||||
if (val & DP_COMMON_CAP_DPRX_DONE) {
|
||||
*rate = tb_dp_cap_get_rate(val);
|
||||
*lanes = tb_dp_cap_get_lanes(val);
|
||||
|
||||
tb_tunnel_dbg(tunnel, "DPRX read done\n");
|
||||
return 0;
|
||||
}
|
||||
usleep_range(100, 150);
|
||||
} while (ktime_before(ktime_get(), timeout));
|
||||
|
||||
tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -1110,6 +1126,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
|
||||
switch (cap) {
|
||||
case DP_LOCAL_CAP:
|
||||
case DP_REMOTE_CAP:
|
||||
case DP_COMMON_CAP:
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1182,7 +1199,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
* reduced one). Otherwise return the remote (possibly
|
||||
* reduced) caps.
|
||||
*/
|
||||
ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
|
||||
ret = tb_dp_wait_dprx(tunnel, 150);
|
||||
if (ret) {
|
||||
if (ret == -ETIMEDOUT)
|
||||
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
|
||||
@ -1190,6 +1207,9 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (sw->generation >= 2) {
|
||||
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
|
||||
if (ret)
|
||||
@ -1313,8 +1333,6 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
|
||||
"DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
|
||||
rate, lanes, tb_dp_bandwidth(rate, lanes));
|
||||
|
||||
out = tunnel->dst_port;
|
||||
|
||||
if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
|
||||
out->cap_adap + DP_LOCAL_CAP, 1))
|
||||
return;
|
||||
|
@ -1462,6 +1462,11 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
|
||||
tb_port_disable(port->dual_link_port);
|
||||
}
|
||||
|
||||
dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n",
|
||||
xd->link_speed);
|
||||
dev_dbg(&xd->dev, "current link width %s\n",
|
||||
tb_width_name(xd->link_width));
|
||||
|
||||
if (device_add(&xd->dev)) {
|
||||
dev_err(&xd->dev, "failed to add XDomain device\n");
|
||||
return -ENODEV;
|
||||
@ -1895,6 +1900,50 @@ struct device_type tb_xdomain_type = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(tb_xdomain_type);
|
||||
|
||||
static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down)
|
||||
{
|
||||
if (!down->dual_link_port)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Gen 4 links come up already as bonded so only update the port
|
||||
* structures here.
|
||||
*/
|
||||
if (tb_port_get_link_generation(down) >= 4) {
|
||||
down->bonded = true;
|
||||
down->dual_link_port->bonded = true;
|
||||
} else {
|
||||
xd->bonding_possible = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void tb_xdomain_link_exit(struct tb_xdomain *xd)
|
||||
{
|
||||
struct tb_port *down = tb_xdomain_downstream_port(xd);
|
||||
|
||||
if (!down->dual_link_port)
|
||||
return;
|
||||
|
||||
if (tb_port_get_link_generation(down) >= 4) {
|
||||
down->bonded = false;
|
||||
down->dual_link_port->bonded = false;
|
||||
} else if (xd->link_width > TB_LINK_WIDTH_SINGLE) {
|
||||
/*
|
||||
* Just return port structures back to way they were and
|
||||
* update credits. No need to update userspace because
|
||||
* the XDomain is removed soon anyway.
|
||||
*/
|
||||
tb_port_lane_bonding_disable(down);
|
||||
tb_port_update_credits(down);
|
||||
} else if (down->dual_link_port) {
|
||||
/*
|
||||
* Re-enable the lane 1 adapter we disabled at the end
|
||||
* of tb_xdomain_get_properties().
|
||||
*/
|
||||
tb_port_enable(down->dual_link_port);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_xdomain_alloc() - Allocate new XDomain object
|
||||
* @tb: Domain where the XDomain belongs
|
||||
@ -1945,7 +1994,8 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
|
||||
goto err_free_local_uuid;
|
||||
} else {
|
||||
xd->needs_uuid = true;
|
||||
xd->bonding_possible = !!down->dual_link_port;
|
||||
|
||||
tb_xdomain_link_init(xd, down);
|
||||
}
|
||||
|
||||
device_initialize(&xd->dev);
|
||||
@ -2014,6 +2064,8 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
|
||||
|
||||
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
|
||||
|
||||
tb_xdomain_link_exit(xd);
|
||||
|
||||
/*
|
||||
* Undo runtime PM here explicitly because it is possible that
|
||||
* the XDomain was never added to the bus and thus device_del()
|
||||
|
Loading…
Reference in New Issue
Block a user