diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index d8b9c734abd3..56790d50f9e3 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, 10, 2000); + 1, 10, 250); if (ret) return ret; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 4b7bec74e89f..fb4f46e51753 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1517,6 +1517,10 @@ static struct pci_device_id nhi_ids[] = { .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) }, diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 0f029ce75882..7a07c7c1a9c2 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -90,6 +90,8 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21 #define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e #define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d +#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833 +#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834 #define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 44e9b09de47a..900114ba4371 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -941,22 +941,6 @@ int tb_port_get_link_generation(struct tb_port *port) } } -static const char *width_name(enum tb_link_width width) -{ - switch (width) { - case TB_LINK_WIDTH_SINGLE: - return "symmetric, single lane"; - case TB_LINK_WIDTH_DUAL: - return "symmetric, dual lanes"; - case TB_LINK_WIDTH_ASYM_TX: - return "asymmetric, 3 transmitters, 1 receiver"; - case TB_LINK_WIDTH_ASYM_RX: - return "asymmetric, 3 receivers, 1 transmitter"; - default: - return "unknown"; - } -} - /** * tb_port_get_link_width() - Get current link width * @port: Port to check (USB4 or CIO) @@ -2769,7 +2753,7 @@ static void tb_switch_link_init(struct tb_switch *sw) return; tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); - tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width)); + tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width)); bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; @@ -2789,6 +2773,19 @@ static void tb_switch_link_init(struct tb_switch *sw) if (down->dual_link_port) down->dual_link_port->bonded = bonded; tb_port_update_credits(down); + + if (tb_port_get_link_generation(up) < 4) + return; + + /* + * Set the Gen 4 preferred link width. This is what the router + * prefers when the link is brought up. If the router does not + * support asymmetric link configuration, this also will be set + * to TB_LINK_WIDTH_DUAL. + */ + sw->preferred_link_width = sw->link_width; + tb_sw_dbg(sw, "preferred link width %s\n", + tb_width_name(sw->preferred_link_width)); } /** @@ -3029,7 +3026,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) tb_switch_update_link_attributes(sw); - tb_sw_dbg(sw, "link width set to %s\n", width_name(width)); + tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width)); return ret; } diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index fd49f86e0353..846d2813bb1a 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -513,8 +513,6 @@ static void tb_port_unconfigure_xdomain(struct tb_port *port) usb4_port_unconfigure_xdomain(port); else tb_lc_unconfigure_xdomain(port); - - tb_port_enable(port->dual_link_port); } static void tb_scan_xdomain(struct tb_port *port) @@ -1087,15 +1085,14 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int requested_up, int requested_down) { + bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; - bool clx, downstream; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; - /* Disable CL states before doing any transitions */ downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) @@ -1103,11 +1100,10 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, else sw = src_port->sw; - clx = tb_disable_clx(sw); - tb_for_each_upstream_port_on_path(src_port, dst_port, up) { + struct tb_port *down = tb_switch_downstream_port(up->sw); + enum tb_link_width width_up, width_down; int consumed_up, consumed_down; - enum tb_link_width width; ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, &consumed_up, &consumed_down); @@ -1128,7 +1124,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, if (consumed_down + requested_down < asym_threshold) continue; - width = TB_LINK_WIDTH_ASYM_RX; + width_up = TB_LINK_WIDTH_ASYM_RX; + width_down = TB_LINK_WIDTH_ASYM_TX; } else { /* Upstream, the opposite of above */ if (consumed_down + requested_down >= TB_ASYM_MIN) { @@ -1138,22 +1135,34 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, if (consumed_up + requested_up < asym_threshold) continue; - width = TB_LINK_WIDTH_ASYM_TX; + width_up = TB_LINK_WIDTH_ASYM_TX; + width_down = TB_LINK_WIDTH_ASYM_RX; } - if (up->sw->link_width == width) + if (up->sw->link_width == width_up) continue; - if (!tb_port_width_supported(up, width)) + if (!tb_port_width_supported(up, width_up) || + !tb_port_width_supported(down, width_down)) continue; + /* + * Disable CL states before doing any transitions. We + * delayed it until now that we know there is a real + * transition taking place. + */ + if (!clx_disabled) { + clx = tb_disable_clx(sw); + clx_disabled = true; + } + tb_sw_dbg(up->sw, "configuring asymmetric link\n"); /* * Here requested + consumed > threshold so we need to * transtion the link into asymmetric now. */ - ret = tb_switch_set_link_width(up->sw, width); + ret = tb_switch_set_link_width(up->sw, width_up); if (ret) { tb_sw_warn(up->sw, "failed to set link width\n"); break; @@ -1174,24 +1183,24 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, * @dst_port: Destination adapter * @requested_up: New lower bandwidth request upstream (Mb/s) * @requested_down: New lower bandwidth request downstream (Mb/s) + * @keep_asym: Keep asymmetric link if preferred * * Goes over each link from @src_port to @dst_port and tries to * transition the link to symmetric if the currently consumed bandwidth - * allows. + * allows and link asymmetric preference is ignored (if @keep_asym is %false). */ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int requested_up, - int requested_down) + int requested_down, bool keep_asym) { + bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; - bool clx, downstream; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; - /* Disable CL states before doing any transitions */ downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) @@ -1199,8 +1208,6 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, else sw = src_port->sw; - clx = tb_disable_clx(sw); - tb_for_each_upstream_port_on_path(src_port, dst_port, up) { int consumed_up, consumed_down; @@ -1233,6 +1240,25 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, if (up->sw->link_width == TB_LINK_WIDTH_DUAL) continue; + /* + * Here consumed < threshold so we can transition the + * link to symmetric. + * + * However, if the router prefers asymmetric link we + * honor that (unless @keep_asym is %false). + */ + if (keep_asym && + up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { + tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); + continue; + } + + /* Disable CL states before doing any transitions */ + if (!clx_disabled) { + clx = tb_disable_clx(sw); + clx_disabled = true; + } + tb_sw_dbg(up->sw, "configuring symmetric link\n"); ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); @@ -1280,7 +1306,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up, struct tb_port *host_port; host_port = tb_port_at(tb_route(sw), tb->root_switch); - tb_configure_sym(tb, host_port, up, 0, 0); + tb_configure_sym(tb, host_port, up, 0, 0, false); } /* Set the link configured */ @@ -1465,7 +1491,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) * If bandwidth on a link is < asym_threshold * transition the link to symmetric. */ - tb_configure_sym(tb, src_port, dst_port, 0, 0); + tb_configure_sym(tb, src_port, dst_port, 0, 0, true); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); @@ -1901,7 +1927,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) return; } - tb_port_dbg(port, "DP %s resource available\n", + tb_port_dbg(port, "DP %s resource available after hotplug\n", tb_port_is_dpin(port) ? "IN" : "OUT"); list_add_tail(&port->list, &tcm->dp_resources); @@ -2287,7 +2313,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, * If bandwidth on a link is < asym_threshold transition * the link to symmetric. */ - tb_configure_sym(tb, in, out, *requested_up, *requested_down); + tb_configure_sym(tb, in, out, *requested_up, *requested_down, true); /* * If requested bandwidth is less or equal than what is * currently allocated to that tunnel we simply change @@ -2330,7 +2356,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, ret = tb_configure_asym(tb, in, out, *requested_up, *requested_down); if (ret) { - tb_configure_sym(tb, in, out, 0, 0); + tb_configure_sym(tb, in, out, 0, 0, true); return ret; } @@ -2338,7 +2364,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, requested_down); if (ret) { tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); - tb_configure_sym(tb, in, out, 0, 0); + tb_configure_sym(tb, in, out, 0, 0, true); } } else { ret = -ENOBUFS; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index e299e53473ae..997c5a536905 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -125,6 +125,7 @@ struct tb_switch_tmu { * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the upstream facing link + * @preferred_link_width: Router preferred link width (only set for Gen 4 links) * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) @@ -178,6 +179,7 @@ struct tb_switch { const char *device_name; unsigned int link_speed; enum tb_link_width link_width; + enum tb_link_width preferred_link_width; bool link_usb4; unsigned int generation; int cap_plug_events; @@ -568,6 +570,22 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) return &sw->ports[port]; } +static inline const char *tb_width_name(enum tb_link_width width) +{ + switch (width) { + case TB_LINK_WIDTH_SINGLE: + return "symmetric, single lane"; + case TB_LINK_WIDTH_DUAL: + return "symmetric, dual lanes"; + case TB_LINK_WIDTH_ASYM_TX: + return "asymmetric, 3 transmitters, 1 receiver"; + case TB_LINK_WIDTH_ASYM_RX: + return "asymmetric, 3 receivers, 1 transmitter"; + default: + return "unknown"; + } +} + /** * tb_port_has_remote() - Does the port have switch connected downstream * @port: Port to check diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 11f2aec2a5d3..9a259c72e5a7 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -894,7 +894,7 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw) ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); if (ret) - return ret; + goto out; /* Program the new mode and the downstream router lane adapter */ switch (sw->tmu.mode_request) { diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 7534cd3a81f4..6fffb2c82d3d 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -173,16 +173,28 @@ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) int ret; /* Only supported of both routers are at least USB4 v2 */ - if (tb_port_get_link_generation(port) < 4) + if ((usb4_switch_version(tunnel->src_port->sw) < 2) || + (usb4_switch_version(tunnel->dst_port->sw) < 2)) + return 0; + + if (enable && tb_port_get_link_generation(port) < 4) return 0; ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); if (ret) return ret; + /* + * Downstream router could be unplugged so disable of encapsulation + * in upstream router is still possible. + */ ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); - if (ret) - return ret; + if (ret) { + if (enable) + return ret; + if (ret != -ENODEV) + return ret; + } tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", str_enabled_disabled(enable)); @@ -199,14 +211,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return res; } - res = tb_pci_port_enable(tunnel->src_port, activate); + if (activate) + res = tb_pci_port_enable(tunnel->dst_port, activate); + else + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; - if (tb_port_is_pcie_up(tunnel->dst_port)) { - res = tb_pci_port_enable(tunnel->dst_port, activate); + + if (activate) { + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; + } else { + /* Downstream router could be unplugged */ + tb_pci_port_enable(tunnel->dst_port, activate); } return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); @@ -1067,8 +1086,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, return 0; } -static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, - int timeout_msec) +static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) { ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); struct tb_port *in = tunnel->src_port; @@ -1087,15 +1105,13 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, return ret; if (val & DP_COMMON_CAP_DPRX_DONE) { - *rate = tb_dp_cap_get_rate(val); - *lanes = tb_dp_cap_get_lanes(val); - tb_tunnel_dbg(tunnel, "DPRX read done\n"); return 0; } usleep_range(100, 150); } while (ktime_before(ktime_get(), timeout)); + tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); return -ETIMEDOUT; } @@ -1110,6 +1126,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, switch (cap) { case DP_LOCAL_CAP: case DP_REMOTE_CAP: + case DP_COMMON_CAP: break; default: @@ -1182,7 +1199,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, * reduced one). Otherwise return the remote (possibly * reduced) caps. */ - ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150); + ret = tb_dp_wait_dprx(tunnel, 150); if (ret) { if (ret == -ETIMEDOUT) ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, @@ -1190,6 +1207,9 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, if (ret) return ret; } + ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); + if (ret) + return ret; } else if (sw->generation >= 2) { ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); if (ret) @@ -1313,8 +1333,6 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", rate, lanes, tb_dp_bandwidth(rate, lanes)); - out = tunnel->dst_port; - if (tb_port_read(out, &dp_cap, TB_CFG_PORT, out->cap_adap + DP_LOCAL_CAP, 1)) return; diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 9803f0bbf20d..9495742913d5 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1462,6 +1462,11 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd) tb_port_disable(port->dual_link_port); } + dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n", + xd->link_speed); + dev_dbg(&xd->dev, "current link width %s\n", + tb_width_name(xd->link_width)); + if (device_add(&xd->dev)) { dev_err(&xd->dev, "failed to add XDomain device\n"); return -ENODEV; @@ -1895,6 +1900,50 @@ struct device_type tb_xdomain_type = { }; EXPORT_SYMBOL_GPL(tb_xdomain_type); +static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down) +{ + if (!down->dual_link_port) + return; + + /* + * Gen 4 links come up already as bonded so only update the port + * structures here. + */ + if (tb_port_get_link_generation(down) >= 4) { + down->bonded = true; + down->dual_link_port->bonded = true; + } else { + xd->bonding_possible = true; + } +} + +static void tb_xdomain_link_exit(struct tb_xdomain *xd) +{ + struct tb_port *down = tb_xdomain_downstream_port(xd); + + if (!down->dual_link_port) + return; + + if (tb_port_get_link_generation(down) >= 4) { + down->bonded = false; + down->dual_link_port->bonded = false; + } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) { + /* + * Just return port structures back to way they were and + * update credits. No need to update userspace because + * the XDomain is removed soon anyway. + */ + tb_port_lane_bonding_disable(down); + tb_port_update_credits(down); + } else if (down->dual_link_port) { + /* + * Re-enable the lane 1 adapter we disabled at the end + * of tb_xdomain_get_properties(). + */ + tb_port_enable(down->dual_link_port); + } +} + /** * tb_xdomain_alloc() - Allocate new XDomain object * @tb: Domain where the XDomain belongs @@ -1945,7 +1994,8 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, goto err_free_local_uuid; } else { xd->needs_uuid = true; - xd->bonding_possible = !!down->dual_link_port; + + tb_xdomain_link_init(xd, down); } device_initialize(&xd->dev); @@ -2014,6 +2064,8 @@ void tb_xdomain_remove(struct tb_xdomain *xd) device_for_each_child_reverse(&xd->dev, xd, unregister_service); + tb_xdomain_link_exit(xd); + /* * Undo runtime PM here explicitly because it is possible that * the XDomain was never added to the bus and thus device_del()