[Freedreno] [PATCH 14/25] drm/msm/dpu: remove enc_id tagging for hw blocks

Jeykumar Sankaran jsanka at codeaurora.org
Wed Oct 10 18:41:57 UTC 2018


On 2018-10-10 08:06, Sean Paul wrote:
> On Mon, Oct 08, 2018 at 09:27:31PM -0700, Jeykumar Sankaran wrote:
>> RM was using encoder id's to tag HW block's to reserve
>> and retrieve later for display pipeline. Now
>> that all the reserved HW blocks for a display are
>> maintained in its crtc state, no retrieval is needed.
>> This patch cleans up RM of encoder id tagging.
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka at codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c    | 90
> +++++++++----------------------
>>  drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 28 ++++------
>>  2 files changed, 36 insertions(+), 82 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
>> index 303f1b3..a8461b8 100644
>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
>> @@ -21,9 +21,6 @@
>>  #include "dpu_encoder.h"
>>  #include "dpu_trace.h"
>> 
>> -#define RESERVED_BY_OTHER(h, r)  \
>> -		((h)->enc_id && (h)->enc_id != r)
>> -
>>  /**
>>   * struct dpu_rm_requirements - Reservation requirements parameter
> bundle
>>   * @topology:  selected topology for the display
>> @@ -38,12 +35,13 @@ struct dpu_rm_requirements {
>>  /**
>>   * struct dpu_rm_hw_blk - hardware block tracking list member
>>   * @list:	List head for list of all hardware blocks tracking items
>> - * @enc_id:	Encoder id to which this blk is binded
>> + * @in_use:     True, if the hw block is assigned to a display
> pipeline.
>> + *		False, otherwise
>>   * @hw:		Pointer to the hardware register access object for
> this block
>>   */
>>  struct dpu_rm_hw_blk {
>>  	struct list_head list;
>> -	uint32_t enc_id;
>> +	bool in_use;
> 
> How do the reservations work for TEST_ONLY commits? At a quick glance 
> it
> looks
> like they might be marked in_use?

Yes. We have a bug. I guess I should be releasing them in 
drm_crtc_destroy_state.

Thanks and Regards,
Jeykumar S.
> 
> Sean
> 
>>  	struct dpu_hw_blk *hw;
>>  };
>> 
>> @@ -51,23 +49,19 @@ struct dpu_rm_hw_blk {
>>   * struct dpu_rm_hw_iter - iterator for use with dpu_rm
>>   * @hw: dpu_hw object requested, or NULL on failure
>>   * @blk: dpu_rm internal block representation. Clients ignore. Used 
>> as
> iterator.
>> - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for 
>> Any
> Encoder
>>   * @type: Hardware Block Type client wishes to search for.
>>   */
>>  struct dpu_rm_hw_iter {
>>  	struct dpu_hw_blk *hw;
>>  	struct dpu_rm_hw_blk *blk;
>> -	uint32_t enc_id;
>>  	enum dpu_hw_blk_type type;
>>  };
>> 
>>  static void _dpu_rm_init_hw_iter(
>>  		struct dpu_rm_hw_iter *iter,
>> -		uint32_t enc_id,
>>  		enum dpu_hw_blk_type type)
>>  {
>>  	memset(iter, 0, sizeof(*iter));
>> -	iter->enc_id = enc_id;
>>  	iter->type = type;
>>  }
>> 
>> @@ -91,16 +85,12 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm 
>> *rm,
> struct dpu_rm_hw_iter *i)
>>  	i->blk = list_prepare_entry(i->blk, blk_list, list);
>> 
>>  	list_for_each_entry_continue(i->blk, blk_list, list) {
>> -		if (i->enc_id == i->blk->enc_id) {
>> +		if (!i->blk->in_use) {
>>  			i->hw = i->blk->hw;
>> -			DPU_DEBUG("found type %d id %d for enc %d\n",
>> -					i->type, i->blk->hw->id,
> i->enc_id);
>>  			return true;
>>  		}
>>  	}
>> 
>> -	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
>> -
>>  	return false;
>>  }
>> 
>> @@ -196,7 +186,6 @@ static int _dpu_rm_hw_blk_create(
>>  	}
>> 
>>  	blk->hw = hw;
>> -	blk->enc_id = 0;
>>  	list_add_tail(&blk->list, &rm->hw_blks[type]);
>> 
>>  	return 0;
>> @@ -301,7 +290,6 @@ static bool _dpu_rm_needs_split_display(const 
>> struct
> msm_display_topology *top)
>>   *	proposed use case requirements, incl. hardwired dependent blocks
> like
>>   *	pingpong
>>   * @rm: dpu resource manager handle
>> - * @enc_id: encoder id requesting for allocation
>>   * @reqs: proposed use case requirements
>>   * @lm: proposed layer mixer, function checks if lm, and all other
> hardwired
>>   *      blocks connected to the lm (pp) is available and appropriate
>> @@ -313,7 +301,6 @@ static bool _dpu_rm_needs_split_display(const 
>> struct
> msm_display_topology *top)
>>   */
>>  static bool _dpu_rm_check_lm_and_get_connected_blks(
>>  		struct dpu_rm *rm,
>> -		uint32_t enc_id,
>>  		struct dpu_rm_requirements *reqs,
>>  		struct dpu_rm_hw_blk *lm,
>>  		struct dpu_rm_hw_blk **pp,
>> @@ -339,13 +326,7 @@ static bool
> _dpu_rm_check_lm_and_get_connected_blks(
>>  		}
>>  	}
>> 
>> -	/* Already reserved? */
>> -	if (RESERVED_BY_OTHER(lm, enc_id)) {
>> -		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
>> -		return false;
>> -	}
>> -
>> -	_dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
>> +	_dpu_rm_init_hw_iter(&iter, DPU_HW_BLK_PINGPONG);
>>  	while (_dpu_rm_get_hw_locked(rm, &iter)) {
>>  		if (iter.blk->hw->id == lm_cfg->pingpong) {
>>  			*pp = iter.blk;
>> @@ -358,16 +339,10 @@ static bool
> _dpu_rm_check_lm_and_get_connected_blks(
>>  		return false;
>>  	}
>> 
>> -	if (RESERVED_BY_OTHER(*pp, enc_id)) {
>> -		DPU_DEBUG("lm %d pp %d already reserved\n", lm->hw->id,
>> -				(*pp)->hw->id);
>> -		return false;
>> -	}
>> -
>>  	return true;
>>  }
>> 
>> -static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
>> +static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
>>  			       struct dpu_crtc_state *dpu_cstate,
>>  			       struct dpu_rm_requirements *reqs)
>> 
>> @@ -384,7 +359,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
> uint32_t enc_id,
>>  	}
>> 
>>  	/* Find a primary mixer */
>> -	_dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
>> +	_dpu_rm_init_hw_iter(&iter_i, DPU_HW_BLK_LM);
>>  	while (lm_count != reqs->topology.num_lm &&
>>  			_dpu_rm_get_hw_locked(rm, &iter_i)) {
>>  		memset(&lm, 0, sizeof(lm));
>> @@ -394,14 +369,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm 
>> *rm,
> uint32_t enc_id,
>>  		lm[lm_count] = iter_i.blk;
>> 
>>  		if (!_dpu_rm_check_lm_and_get_connected_blks(
>> -				rm, enc_id, reqs, lm[lm_count],
>> +				rm, reqs, lm[lm_count],
>>  				&pp[lm_count], NULL))
>>  			continue;
>> 
>>  		++lm_count;
>> 
>>  		/* Valid primary mixer found, find matching peers */
>> -		_dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
>> +		_dpu_rm_init_hw_iter(&iter_j, DPU_HW_BLK_LM);
>> 
>>  		while (lm_count != reqs->topology.num_lm &&
>>  				_dpu_rm_get_hw_locked(rm, &iter_j)) {
>> @@ -409,7 +384,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
> uint32_t enc_id,
>>  				continue;
>> 
>>  			if (!_dpu_rm_check_lm_and_get_connected_blks(
>> -					rm, enc_id, reqs, iter_j.blk,
>> +					rm, reqs, iter_j.blk,
>>  					&pp[lm_count], iter_i.blk))
>>  				continue;
>> 
>> @@ -427,13 +402,13 @@ static int _dpu_rm_reserve_lms(struct dpu_rm 
>> *rm,
> uint32_t enc_id,
>>  		if (!lm[i])
>>  			break;
>> 
>> -		lm[i]->enc_id = enc_id;
>> -		pp[i]->enc_id = enc_id;
>> +		lm[i]->in_use = true;
>> +		pp[i]->in_use = true;
>> 
>>  		dpu_cstate->mixers[i].hw_lm = to_dpu_hw_mixer(lm[i]->hw);
>>  		dpu_cstate->mixers[i].hw_pp =
> to_dpu_hw_pingpong(pp[i]->hw);
>> 
>> -		trace_dpu_rm_reserve_lms(lm[i]->hw->id, DPU_HW_BLK_LM,
> enc_id,
>> +		trace_dpu_rm_reserve_lms(lm[i]->hw->id, DPU_HW_BLK_LM,
>>  					 pp[i]->hw->id);
>>  	}
>> 
>> @@ -444,7 +419,6 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
> uint32_t enc_id,
>> 
>>  static int _dpu_rm_reserve_ctls(
>>  		struct dpu_rm *rm,
>> -		uint32_t enc_id,
>>  		struct dpu_crtc_state *dpu_cstate,
>>  		const struct msm_display_topology *top)
>>  {
>> @@ -460,15 +434,12 @@ static int _dpu_rm_reserve_ctls(
>> 
>>  	needs_split_display = _dpu_rm_needs_split_display(top);
>> 
>> -	_dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
>> +	_dpu_rm_init_hw_iter(&iter, DPU_HW_BLK_CTL);
>>  	while (_dpu_rm_get_hw_locked(rm, &iter)) {
>>  		const struct dpu_hw_ctl *ctl =
> to_dpu_hw_ctl(iter.blk->hw);
>>  		unsigned long features = ctl->caps->features;
>>  		bool has_split_display;
>> 
>> -		if (RESERVED_BY_OTHER(iter.blk, enc_id))
>> -			continue;
>> -
>>  		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
>> 
>>  		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->hw->id,
> features);
>> @@ -487,11 +458,10 @@ static int _dpu_rm_reserve_ctls(
>>  		return -ENAVAIL;
>> 
>>  	for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
>> -		ctls[i]->enc_id = enc_id;
>> +		ctls[i]->in_use = true;
>>  		dpu_cstate->hw_ctls[i] = to_dpu_hw_ctl(ctls[i]->hw);
>> 
>> -		trace_dpu_rm_reserve_ctls(ctls[i]->hw->id, DPU_HW_BLK_CTL,
>> -					  enc_id);
>> +		trace_dpu_rm_reserve_ctls(ctls[i]->hw->id,
> DPU_HW_BLK_CTL);
>>  	}
>> 
>>  	dpu_cstate->num_ctls = num_ctls;
>> @@ -501,26 +471,19 @@ static int _dpu_rm_reserve_ctls(
>> 
>>  static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
>>  		struct dpu_rm *rm,
>> -		uint32_t enc_id,
>>  		uint32_t id,
>>  		enum dpu_hw_blk_type type)
>>  {
>>  	struct dpu_rm_hw_iter iter;
>> 
>>  	/* Find the block entry in the rm, and note the reservation */
>> -	_dpu_rm_init_hw_iter(&iter, 0, type);
>> +	_dpu_rm_init_hw_iter(&iter, type);
>>  	while (_dpu_rm_get_hw_locked(rm, &iter)) {
>>  		if (iter.blk->hw->id != id)
>>  			continue;
>> 
>> -		if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
>> -			DPU_ERROR("type %d id %d already reserved\n",
> type, id);
>> -			return NULL;
>> -		}
>> +		trace_dpu_rm_reserve_intf(iter.blk->hw->id,
> DPU_HW_BLK_INTF);
>> 
>> -		iter.blk->enc_id = enc_id;
>> -		trace_dpu_rm_reserve_intf(iter.blk->hw->id,
> DPU_HW_BLK_INTF,
>> -					  enc_id);
>>  		break;
>>  	}
>> 
>> @@ -535,7 +498,6 @@ static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
>> 
>>  static int _dpu_rm_reserve_intf_related_hw(
>>  		struct dpu_rm *rm,
>> -		uint32_t enc_id,
>>  		struct dpu_crtc_state *dpu_cstate,
>>  		struct dpu_encoder_hw_resources *hw_res)
>>  {
>> @@ -546,11 +508,12 @@ static int _dpu_rm_reserve_intf_related_hw(
>>  		if (hw_res->intfs[i] == INTF_MODE_NONE)
>>  			continue;
>> 
>> -		blk = _dpu_rm_reserve_intf(rm, enc_id, i + INTF_0,
>> +		blk = _dpu_rm_reserve_intf(rm, i + INTF_0,
>>  				DPU_HW_BLK_INTF);
>>  		if (!blk)
>>  			return -ENAVAIL;
>> 
>> +		blk->in_use = true;
>>  		dpu_cstate->hw_intfs[num_intfs++] =
> to_dpu_hw_intf(blk->hw);
>>  	}
>> 
>> @@ -561,27 +524,24 @@ static int _dpu_rm_reserve_intf_related_hw(
>> 
>>  static int _dpu_rm_make_reservation(
>>  		struct dpu_rm *rm,
>> -		struct drm_encoder *enc,
>>  		struct dpu_crtc_state *dpu_cstate,
>>  		struct dpu_rm_requirements *reqs)
>>  {
>>  	int ret;
>> 
>> -	ret = _dpu_rm_reserve_lms(rm, enc->base.id, dpu_cstate, reqs);
>> +	ret = _dpu_rm_reserve_lms(rm, dpu_cstate, reqs);
>>  	if (ret) {
>>  		DPU_ERROR("unable to find appropriate mixers\n");
>>  		return ret;
>>  	}
>> 
>> -	ret = _dpu_rm_reserve_ctls(rm, enc->base.id, dpu_cstate,
>> -				   &reqs->topology);
>> +	ret = _dpu_rm_reserve_ctls(rm, dpu_cstate, &reqs->topology);
>>  	if (ret) {
>>  		DPU_ERROR("unable to find appropriate CTL\n");
>>  		return ret;
>>  	}
>> 
>> -	ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id,
> dpu_cstate,
>> -					      &reqs->hw_res);
>> +	ret = _dpu_rm_reserve_intf_related_hw(rm, dpu_cstate,
> &reqs->hw_res);
>>  	if (ret)
>>  		return ret;
>> 
>> @@ -612,7 +572,7 @@ static int _dpu_rm_release_hw(struct dpu_rm *rm,
> enum dpu_hw_blk_type type,
>> 
>>  	list_for_each_entry(blk, &rm->hw_blks[type], list) {
>>  		if (blk->hw->id == id) {
>> -			blk->enc_id = 0;
>> +			blk->in_use = false;
>>  			return 0;
>>  		}
>>  	}
>> @@ -699,7 +659,7 @@ int dpu_rm_reserve(
>>  		goto end;
>>  	}
>> 
>> -	ret = _dpu_rm_make_reservation(rm, enc, dpu_cstate, &reqs);
>> +	ret = _dpu_rm_make_reservation(rm, dpu_cstate, &reqs);
>>  	if (ret) {
>>  		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
>>  		_dpu_rm_release_reservation(rm, dpu_cstate);
>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
>> index 636b31b..3061978 100644
>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
>> @@ -869,48 +869,42 @@
>>  );
>> 
>>  DECLARE_EVENT_CLASS(dpu_rm_iter_template,
>> -	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
>> -	TP_ARGS(id, type, enc_id),
>> +	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type),
>> +	TP_ARGS(id, type),
>>  	TP_STRUCT__entry(
>>  		__field(	uint32_t,		id	)
>>  		__field(	enum dpu_hw_blk_type,	type	)
>> -		__field(	uint32_t,		enc_id	)
>>  	),
>>  	TP_fast_assign(
>>  		__entry->id = id;
>>  		__entry->type = type;
>> -		__entry->enc_id = enc_id;
>>  	),
>> -	TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
>> -		  __entry->enc_id)
>> +	TP_printk("id:%d type:%d ", __entry->id, __entry->type)
>>  );
>>  DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
>> -	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
>> -	TP_ARGS(id, type, enc_id)
>> +	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type),
>> +	TP_ARGS(id, type)
>>  );
>>  DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
>> -	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
>> -	TP_ARGS(id, type, enc_id)
>> +	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type),
>> +	TP_ARGS(id, type)
>>  );
>> 
>>  TRACE_EVENT(dpu_rm_reserve_lms,
>> -	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
>> -		 uint32_t pp_id),
>> -	TP_ARGS(id, type, enc_id, pp_id),
>> +	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t pp_id),
>> +	TP_ARGS(id, type, pp_id),
>>  	TP_STRUCT__entry(
>>  		__field(	uint32_t,		id	)
>>  		__field(	enum dpu_hw_blk_type,	type	)
>> -		__field(	uint32_t,		enc_id	)
>>  		__field(	uint32_t,		pp_id	)
>>  	),
>>  	TP_fast_assign(
>>  		__entry->id = id;
>>  		__entry->type = type;
>> -		__entry->enc_id = enc_id;
>>  		__entry->pp_id = pp_id;
>>  	),
>> -	TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
>> -		  __entry->type, __entry->enc_id, __entry->pp_id)
>> +	TP_printk("id:%d type:%d pp_id:%u", __entry->id,
>> +		  __entry->type, __entry->pp_id)
>>  );
>> 
>>  TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 

-- 
Jeykumar S


More information about the Freedreno mailing list