[PATCH v2] drm/dp_mst: Clear MSG_RDY flag before sending new message

Lin, Wayne Wayne.Lin at amd.com
Tue May 16 09:29:28 UTC 2023


[Public]

Hi,

Ping again for code review. Much appreciated!

Regards,
Wayne

> -----Original Message-----
> From: Lin, Wayne
> Sent: Monday, May 8, 2023 5:49 PM
> To: lyude at redhat.com; jani.nikula at intel.com; dri-
> devel at lists.freedesktop.org; amd-gfx at lists.freedesktop.org
> Cc: ville.syrjala at linux.intel.com; imre.deak at intel.com; Wentland, Harry
> <Harry.Wentland at amd.com>; Zuo, Jerry <Jerry.Zuo at amd.com>;
> stable at vger.kernel.org
> Subject: RE: [PATCH v2] drm/dp_mst: Clear MSG_RDY flag before sending
> new message
> 
> [Public]
> 
> Hi Lyude and Jani,
> 
> Could you help to review please? Thanks for your time!
> 
> Regards,
> Wayne Lin
> > -----Original Message-----
> > From: Wayne Lin <Wayne.Lin at amd.com>
> > Sent: Thursday, April 27, 2023 3:29 PM
> > To: dri-devel at lists.freedesktop.org; amd-gfx at lists.freedesktop.org
> > Cc: lyude at redhat.com; ville.syrjala at linux.intel.com;
> > jani.nikula at intel.com; imre.deak at intel.com; Wentland, Harry
> > <Harry.Wentland at amd.com>; Zuo, Jerry <Jerry.Zuo at amd.com>; Lin,
> Wayne
> > <Wayne.Lin at amd.com>; stable at vger.kernel.org
> > Subject: [PATCH v2] drm/dp_mst: Clear MSG_RDY flag before sending new
> > message
> >
> > [Why]
> > The sequence for collecting down_reply from source perspective should
> > be:
> >
> > Request_n->repeat (get partial reply of Request_n->clear message ready
> > flag to ack DPRX that the message is received) till all partial
> > replies for Request_n are received->new Request_n+1.
> >
> > Now there is chance that drm_dp_mst_hpd_irq() will fire new down
> > request in the tx queue when the down reply is incomplete. Source is
> > restricted to generate interveleaved message transactions so we should
> avoid it.
> >
> > Also, while assembling partial reply packets, reading out DPCD
> > DOWN_REP Sideband MSG buffer + clearing DOWN_REP_MSG_RDY flag
> should
> > be wrapped up as a complete operation for reading out a reply packet.
> > Kicking off a new request before clearing DOWN_REP_MSG_RDY flag might
> > be risky. e.g. If the reply of the new request has overwritten the
> > DPRX DOWN_REP Sideband MSG buffer before source writing one to clear
> > DOWN_REP_MSG_RDY flag, source then unintentionally flushes the reply
> > for the new request. Should handle the up request in the same way.
> >
> > [How]
> > Separete drm_dp_mst_hpd_irq() into 2 steps. After acking the MST IRQ
> > event, driver calls drm_dp_mst_hpd_irq_step2() and might trigger
> > drm_dp_mst_kick_tx() only when there is no on going message transaction.
> >
> > Changes since v1:
> > * Reworked on review comments received
> > -> Adjust the fix to let driver explicitly kick off new down request
> > when mst irq event is handled and acked
> > -> Adjust the commit message
> >
> > Signed-off-by: Wayne Lin <Wayne.Lin at amd.com>
> > Cc: stable at vger.kernel.org
> > ---
> >  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  8 ++---
> > drivers/gpu/drm/display/drm_dp_mst_topology.c | 35
> > ++++++++++++++++---
> >  drivers/gpu/drm/i915/display/intel_dp.c       |  5 ++-
> >  drivers/gpu/drm/nouveau/dispnv50/disp.c       |  5 ++-
> >  include/drm/display/drm_dp_mst_helper.h       |  4 +--
> >  5 files changed, 45 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > index 1ad67c2a697e..48bdcb2ee9b1 100644
> > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > @@ -3259,10 +3259,9 @@ static void
> dm_handle_mst_sideband_msg(struct
> > amdgpu_dm_connector *aconnector)
> >  		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1],
> esi[2]);
> >  		/* handle HPD short pulse irq */
> >  		if (aconnector->mst_mgr.mst_state)
> > -			drm_dp_mst_hpd_irq(
> > -				&aconnector->mst_mgr,
> > -				esi,
> > -				&new_irq_handled);
> > +			drm_dp_mst_hpd_irq_step1(&aconnector-
> > >mst_mgr,
> > +						 esi,
> > +						 &new_irq_handled);
> >
> >  		if (new_irq_handled) {
> >  			/* ACK at DPCD to notify down stream */ @@ -3281,6
> > +3280,7 @@ static void dm_handle_mst_sideband_msg(struct
> > amdgpu_dm_connector *aconnector)
> >  					break;
> >  			}
> >
> > +			drm_dp_mst_hpd_irq_step2(&aconnector-
> > >mst_mgr);
> >  			/* check if there is new irq to be handled */
> >  			dret = drm_dp_dpcd_read(
> >  				&aconnector->dm_dp_aux.aux,
> > diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/display/drm_dp_mst_topology.c
> > index 70df29fe92db..2e0a38a6509c 100644
> > --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
> > @@ -4045,7 +4045,7 @@ static int drm_dp_mst_handle_up_req(struct
> > drm_dp_mst_topology_mgr *mgr)  }
> >
> >  /**
> > - * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
> > + * drm_dp_mst_hpd_irq_step1() - MST hotplug IRQ notify
> >   * @mgr: manager to notify irq for.
> >   * @esi: 4 bytes from SINK_COUNT_ESI
> >   * @handled: whether the hpd interrupt was consumed or not @@ -4055,7
> > +4055,7 @@ static int drm_dp_mst_handle_up_req(struct
> > drm_dp_mst_topology_mgr *mgr)
> >   * topology manager will process the sideband messages received as a
> result
> >   * of this.
> >   */
> > -int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8
> *esi,
> > bool *handled)
> > +int drm_dp_mst_hpd_irq_step1(struct drm_dp_mst_topology_mgr *mgr,
> > u8
> > +*esi, bool *handled)
> >  {
> >  	int ret = 0;
> >  	int sc;
> > @@ -4077,11 +4077,38 @@ int drm_dp_mst_hpd_irq(struct
> > drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
> >  		*handled = true;
> >  	}
> >
> > -	drm_dp_mst_kick_tx(mgr);
> >  	return ret;
> >  }
> > -EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
> > +EXPORT_SYMBOL(drm_dp_mst_hpd_irq_step1);
> > +
> > +/**
> > + * drm_dp_mst_hpd_irq_step2() - MST hotplug IRQ 2nd part handling
> > + * @mgr: manager to notify irq for.
> > + *
> > + * This should be called from the driver when mst irq event is
> > +handled
> > + * and acked. Note that new down request should only be sent when
> > + * previous message transaction is done. Source is not supposed to
> > +generate
> > + * interleaved message transactions.
> > + */
> > +void drm_dp_mst_hpd_irq_step2(struct drm_dp_mst_topology_mgr
> *mgr)
> > {
> > +	struct drm_dp_sideband_msg_tx *txmsg;
> > +	bool skip = false;
> >
> > +	mutex_lock(&mgr->qlock);
> > +	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
> > +					 struct drm_dp_sideband_msg_tx,
> > next);
> > +	/* If last transaction is not completed yet*/
> > +	if (!txmsg ||
> > +	    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
> > +	    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
> > +		skip = true;
> > +	mutex_unlock(&mgr->qlock);
> > +
> > +	if (!skip)
> > +		drm_dp_mst_kick_tx(mgr);
> > +}
> > +EXPORT_SYMBOL(drm_dp_mst_hpd_irq_step2);
> >  /**
> >   * drm_dp_mst_detect_port() - get connection status for an MST port
> >   * @connector: DRM connector for this port diff --git
> > a/drivers/gpu/drm/i915/display/intel_dp.c
> > b/drivers/gpu/drm/i915/display/intel_dp.c
> > index 75070eb07d4b..9a9a5aec9534 100644
> > --- a/drivers/gpu/drm/i915/display/intel_dp.c
> > +++ b/drivers/gpu/drm/i915/display/intel_dp.c
> > @@ -3803,7 +3803,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp,
> > u8 *esi, u8 *ack)  {
> >  	bool handled = false;
> >
> > -	drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
> > +	drm_dp_mst_hpd_irq_step1(&intel_dp->mst_mgr, esi, &handled);
> >  	if (handled)
> >  		ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY |
> DP_UP_REQ_MSG_RDY);
> >
> > @@ -3880,6 +3880,9 @@ intel_dp_check_mst_status(struct intel_dp
> > *intel_dp)
> >
> >  		if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
> >  			drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
> > +
> > +		if (ack[1] & (DP_DOWN_REP_MSG_RDY |
> > DP_UP_REQ_MSG_RDY))
> > +			drm_dp_mst_hpd_irq_step2(&intel_dp->mst_mgr);
> >  	}
> >
> >  	return link_ok;
> > diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c
> > b/drivers/gpu/drm/nouveau/dispnv50/disp.c
> > index ed9d374147b8..00c36fcc8afd 100644
> > --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
> > +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
> > @@ -1332,12 +1332,15 @@ nv50_mstm_service(struct nouveau_drm *drm,
> >  			break;
> >  		}
> >
> > -		drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
> > +		drm_dp_mst_hpd_irq_step1(&mstm->mgr, esi, &handled);
> >  		if (!handled)
> >  			break;
> >
> >  		rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1,
> &esi[1],
> >  				       3);
> > +
> > +		drm_dp_mst_hpd_irq_step2(&mstm->mgr);
> > +
> >  		if (rc != 3) {
> >  			ret = false;
> >  			break;
> > diff --git a/include/drm/display/drm_dp_mst_helper.h
> > b/include/drm/display/drm_dp_mst_helper.h
> > index 32c764fb9cb5..6c08ba765d5a 100644
> > --- a/include/drm/display/drm_dp_mst_helper.h
> > +++ b/include/drm/display/drm_dp_mst_helper.h
> > @@ -815,8 +815,8 @@ void drm_dp_mst_topology_mgr_destroy(struct
> > drm_dp_mst_topology_mgr *mgr);  bool drm_dp_read_mst_cap(struct
> > drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);  int
> > drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
> *mgr,
> > bool mst_state);
> >
> > -int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8
> *esi,
> > bool *handled);
> > -
> > +int drm_dp_mst_hpd_irq_step1(struct drm_dp_mst_topology_mgr *mgr,
> > u8
> > +*esi, bool *handled); void drm_dp_mst_hpd_irq_step2(struct
> > +drm_dp_mst_topology_mgr *mgr);
> >
> >  int
> >  drm_dp_mst_detect_port(struct drm_connector *connector,
> > --
> > 2.37.3


More information about the dri-devel mailing list