[PATCH 3/3] drm/dp_mst: Remove single tx msg restriction.

Lin, Wayne Wayne.Lin at amd.com
Fri Feb 14 05:57:59 UTC 2020


[AMD Public Use]

Hi Paul,

Thanks for the mail!

I tried to solve this problem by having restriction on sending one msg at a time due to hub/dock compatibility problems.
>From my experience, some branch devices don't handle well on interleaved replies (Dock from HP I think)
As the result of that, correct me if I'm wrong, I remember most gpu vendors just send one down request at a time now in windows environment.
I would suggest the original solution :)

Thanks!
> -----Original Message-----
> From: Sean Paul <sean at poorly.run>
> Sent: Friday, February 14, 2020 5:15 AM
> To: dri-devel at lists.freedesktop.org
> Cc: lyude at redhat.com; Lin, Wayne <Wayne.Lin at amd.com>; Sean Paul
> <seanpaul at chromium.org>; Maarten Lankhorst
> <maarten.lankhorst at linux.intel.com>; Maxime Ripard <mripard at kernel.org>;
> David Airlie <airlied at linux.ie>
> Subject: [PATCH 3/3] drm/dp_mst: Remove single tx msg restriction.
> 
> From: Sean Paul <seanpaul at chromium.org>
> 
> Now that we can support multiple simultaneous replies, remove the
> restrictions placed on sending new tx msgs.
> 
> This patch essentially just reverts commit
>   5a64967a2f3b ("drm/dp_mst: Have DP_Tx send one msg at a time") now
> that the problem is solved in a different way.
> 
> Cc: Wayne Lin <Wayne.Lin at amd.com>
> Signed-off-by: Sean Paul <seanpaul at chromium.org>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++------------
>  include/drm/drm_dp_mst_helper.h       |  6 ------
>  2 files changed, 2 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 7e6a82efdfc02..cbf0bb0ddeb84 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1203,8 +1203,6 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>  			mstb->tx_slots[txmsg->seqno] = NULL;
>  		}
> -		mgr->is_waiting_for_dwn_reply = false;
> -
>  	}
>  out:
>  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@
> -1214,7 +1212,6 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  	}
>  	mutex_unlock(&mgr->qlock);
> 
> -	drm_dp_mst_kick_tx(mgr);
>  	return ret;
>  }
> 
> @@ -2797,11 +2794,9 @@ static void process_single_down_tx_qlock(struct
> drm_dp_mst_topology_mgr *mgr)
>  	ret = process_single_tx_qlock(mgr, txmsg, false);
>  	if (ret == 1) {
>  		/* txmsg is sent it should be in the slots now */
> -		mgr->is_waiting_for_dwn_reply = true;
>  		list_del(&txmsg->next);
>  	} else if (ret) {
>  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> -		mgr->is_waiting_for_dwn_reply = false;
>  		list_del(&txmsg->next);
>  		if (txmsg->seqno != -1)
>  			txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2841,8
> +2836,7 @@ static void drm_dp_queue_down_tx(struct
> drm_dp_mst_topology_mgr *mgr,
>  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>  	}
> 
> -	if (list_is_singular(&mgr->tx_msg_downq) &&
> -	    !mgr->is_waiting_for_dwn_reply)
> +	if (list_is_singular(&mgr->tx_msg_downq))
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> @@ -3822,7 +3816,6 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  	mutex_lock(&mgr->qlock);
>  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>  	mstb->tx_slots[seqno] = NULL;
> -	mgr->is_waiting_for_dwn_reply = false;
>  	mutex_unlock(&mgr->qlock);
> 
>  	wake_up_all(&mgr->tx_waitq);
> @@ -3830,9 +3823,6 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  	return 0;
> 
>  out_clear_reply:
> -	mutex_lock(&mgr->qlock);
> -	mgr->is_waiting_for_dwn_reply = false;
> -	mutex_unlock(&mgr->qlock);
>  	if (msg)
>  		memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
>  out:
> @@ -4670,7 +4660,7 @@ static void drm_dp_tx_work(struct work_struct
> *work)
>  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct
> drm_dp_mst_topology_mgr, tx_work);
> 
>  	mutex_lock(&mgr->qlock);
> -	if (!list_empty(&mgr->tx_msg_downq)
> && !mgr->is_waiting_for_dwn_reply)
> +	if (!list_empty(&mgr->tx_msg_downq))
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h
> b/include/drm/drm_dp_mst_helper.h index 7d0341f94ce1b..fcc30e64c8e7e
> 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -619,12 +619,6 @@ struct drm_dp_mst_topology_mgr {
>  	 * &drm_dp_sideband_msg_tx.state once they are queued
>  	 */
>  	struct mutex qlock;
> -
> -	/**
> -	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down
> reply
> -	 */
> -	bool is_waiting_for_dwn_reply;
> -
>  	/**
>  	 * @tx_msg_downq: List of pending down replies.
>  	 */
> --
> Sean Paul, Software Engineer, Google / Chromium OS
--
Wayne Lin


More information about the dri-devel mailing list