thunderbolt: Workaround an IOMMU fault on certain systems with Intel Maple Ridge

On some systems the IOMMU blocks the first couple of driver ready
messages to the connection manager firmware as can be seen in below
excerpts:

  thunderbolt 0000:06:00.0: AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x0010 address=0xbb0e3400 flags=0x0020]

or

  DMAR: DRHD: handling fault status reg 2
  DMAR: [DMA Write] Request device [04:00.0] PASID ffffffff fault addr 69974000 [fault reason 05] PTE Write access is not set

The reason is unknown and hard to debug because we were not able to
reproduce this locally. This only happens on certain systems with Intel
Maple Ridge Thunderbolt controller. If there is a device connected when
the driver is loaded the issue does not happen either. Only when there
is nothing connected (so typically when the system is booted up).

We can work this around by sending the driver ready several times. After
a couple of retries the message goes through and the controller works
just fine. For this reason make the number of retries a parameter for
icm_request() and then for Maple Ridge (and Titan Ridge as they us the
same function but this should not matter) increase number of retries
while shortening the timeout accordingly.

Reported-by: Werner Sembach <wse@tuxedocomputers.com>
Reported-by: Konrad J Hambrick <kjhambrick@gmail.com>
Reported-by: Calvin Walton <calvin.walton@kepstin.ca>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=214259
Cc: stable@vger.kernel.org
Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
This commit is contained in:
Mika Westerberg 2023-08-18 15:27:46 +03:00
parent 0bb80ecc33
commit 582620d9f6

View File

@ -41,6 +41,7 @@
#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
#define ICM_TIMEOUT 5000 /* ms */
#define ICM_RETRIES 3
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
static int icm_request(struct tb *tb, const void *request, size_t request_size,
void *response, size_t response_size, size_t npackets,
unsigned int timeout_msec)
int retries, unsigned int timeout_msec)
{
struct icm *icm = tb_priv(tb);
int retries = 3;
do {
struct tb_cfg_request *req;
@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
return -ENOMEM;
ret = icm_request(tb, &request, sizeof(request), switches,
sizeof(*switches), npackets, ICM_TIMEOUT);
sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
goto err_free;
@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
/* Use larger timeout as establishing tunnels can take some time */
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
1, 10, 2000);
if (ret)
return ret;
@ -1053,7 +1053,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@ -1081,7 +1081,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1110,7 +1110,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1144,7 +1144,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1170,7 +1170,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1496,7 +1496,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1522,7 +1522,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1543,7 +1543,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1604,7 +1604,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1626,7 +1626,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
1, ICM_RETRIES, 20000);
if (ret)
return ret;
@ -2298,7 +2298,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;