mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
drm/dp_mst: Remove single tx msg restriction.
Now that we can support multiple simultaneous replies, remove the
restrictions placed on sending new tx msgs.
This patch essentially just reverts commit
5a64967a2f
("drm/dp_mst: Have DP_Tx send one msg at a time")
now that the problem is solved in a different way.
Cc: Wayne Lin <Wayne.Lin@amd.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Wayne Lin <waynelin@amd.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20200213211523.156998-4-sean@poorly.run
This commit is contained in:
parent
fbc821c4a5
commit
6bb0942e8f
@ -1205,8 +1205,6 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
|
||||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
|
||||
mstb->tx_slots[txmsg->seqno] = NULL;
|
||||
}
|
||||
mgr->is_waiting_for_dwn_reply = false;
|
||||
|
||||
}
|
||||
out:
|
||||
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
|
||||
@ -1216,7 +1214,6 @@ out:
|
||||
}
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
drm_dp_mst_kick_tx(mgr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2796,11 +2793,9 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
|
||||
ret = process_single_tx_qlock(mgr, txmsg, false);
|
||||
if (ret == 1) {
|
||||
/* txmsg is sent it should be in the slots now */
|
||||
mgr->is_waiting_for_dwn_reply = true;
|
||||
list_del(&txmsg->next);
|
||||
} else if (ret) {
|
||||
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
|
||||
mgr->is_waiting_for_dwn_reply = false;
|
||||
list_del(&txmsg->next);
|
||||
if (txmsg->seqno != -1)
|
||||
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
|
||||
@ -2840,8 +2835,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
|
||||
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
|
||||
}
|
||||
|
||||
if (list_is_singular(&mgr->tx_msg_downq) &&
|
||||
!mgr->is_waiting_for_dwn_reply)
|
||||
if (list_is_singular(&mgr->tx_msg_downq))
|
||||
process_single_down_tx_qlock(mgr);
|
||||
mutex_unlock(&mgr->qlock);
|
||||
}
|
||||
@ -3828,7 +3822,6 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
||||
mutex_lock(&mgr->qlock);
|
||||
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
|
||||
mstb->tx_slots[seqno] = NULL;
|
||||
mgr->is_waiting_for_dwn_reply = false;
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
wake_up_all(&mgr->tx_waitq);
|
||||
@ -3836,9 +3829,6 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
||||
return 0;
|
||||
|
||||
out_clear_reply:
|
||||
mutex_lock(&mgr->qlock);
|
||||
mgr->is_waiting_for_dwn_reply = false;
|
||||
mutex_unlock(&mgr->qlock);
|
||||
if (msg)
|
||||
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
out:
|
||||
@ -4696,7 +4686,7 @@ static void drm_dp_tx_work(struct work_struct *work)
|
||||
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
|
||||
|
||||
mutex_lock(&mgr->qlock);
|
||||
if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
|
||||
if (!list_empty(&mgr->tx_msg_downq))
|
||||
process_single_down_tx_qlock(mgr);
|
||||
mutex_unlock(&mgr->qlock);
|
||||
}
|
||||
|
@ -590,11 +590,6 @@ struct drm_dp_mst_topology_mgr {
|
||||
*/
|
||||
bool payload_id_table_cleared : 1;
|
||||
|
||||
/**
|
||||
* @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
|
||||
*/
|
||||
bool is_waiting_for_dwn_reply : 1;
|
||||
|
||||
/**
|
||||
* @mst_primary: Pointer to the primary/first branch device.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user