[Freedreno] [PATCH v2 5/5] drm/msm: subclass work object for vblank events

Jeykumar Sankaran jsanka at codeaurora.org
Fri Dec 7 02:32:09 UTC 2018


On 2018-12-06 10:56, Jeykumar Sankaran wrote:
> On 2018-11-07 07:55, Sean Paul wrote:
>> On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>>> msm maintains a separate structure to define vblank
>>> work definitions and a list to track events submitted
>>> to the workqueue. We can avoid this redundant list
>>> and its protection mechanism, if we subclass the
>>> work object to encapsulate vblank event parameters.
>>> 
>>> changes in v2:
>>> 	- subclass optimization on system wq (Sean Paul)
>> 
>> I wouldn't do it like this, tbh. One problem is that you've lost your
>> flush() on
>> unbind, so there's no way to know if you have workers in the wild 
>> waiting
>> to
>> enable/disable vblank.
>> 
> I believe I didnt respond to this quesition of yours. This patch is
> removing the flush on the work. But flush_workqueue before destroying 
> the WQ
> will make sure all the queued work jobs are run to completion.
> 
> To honor the comment, I will fix the patch to move the flush/destroy
> wq before the
> irq_unininstall.
> 
Above is possible if we use priv->wq for vblank requests. Posted v4 with
the above fixes.

Thanks,
Jeykumar S.
> 
>> Another issues is that AFAICT, we don't need a queue of 
>> enables/disables,
>> but
>> rather just the last requested state (ie: should we be on or off). So
>> things
>> don't need to be this complicated (and we're possibly thrashing vblank
>> on/off
>> for no reason).
>> 
>> I'm still of the mind that you should just make this synchronous and 
>> be
>> done
>> with the threads (especially since we're still uncovering/introducing
>> races!).
>> 
>> Sean
>> 
>>> 
>>> Signed-off-by: Jeykumar Sankaran <jsanka at codeaurora.org>
>>> ---
>>>  drivers/gpu/drm/msm/msm_drv.c | 67
>> +++++++++++++------------------------------
>>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>>  2 files changed, 20 insertions(+), 54 deletions(-)
>>> 
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> b/drivers/gpu/drm/msm/msm_drv.c
>>> index 6d6c73b..8da5be2 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.c
>>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>>  	return val;
>>>  }
>>> 
>>> -struct vblank_event {
>>> -	struct list_head node;
>>> +struct msm_vblank_work {
>>> +	struct work_struct work;
>>>  	int crtc_id;
>>>  	bool enable;
>>> +	struct msm_drm_private *priv;
>>>  };
>>> 
>>>  static void vblank_ctrl_worker(struct work_struct *work)
>>>  {
>>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>>> -						struct msm_vblank_ctrl,
>> work);
>>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>>> -					struct msm_drm_private,
>> vblank_ctrl);
>>> +	struct msm_vblank_work *vbl_work = container_of(work,
>>> +						struct msm_vblank_work,
>> work);
>>> +	struct msm_drm_private *priv = vbl_work->priv;
>>>  	struct msm_kms *kms = priv->kms;
>>> -	struct vblank_event *vbl_ev, *tmp;
>>> -	unsigned long flags;
>>> -
>>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> {
>>> -		list_del(&vbl_ev->node);
>>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> -
>>> -		if (vbl_ev->enable)
>>> -			kms->funcs->enable_vblank(kms,
>>> -
>> priv->crtcs[vbl_ev->crtc_id]);
>>> -		else
>>> -			kms->funcs->disable_vblank(kms,
>>> -
>> priv->crtcs[vbl_ev->crtc_id]);
>>> 
>>> -		kfree(vbl_ev);
>>> -
>>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	}
>>> +	if (vbl_work->enable)
>>> +		kms->funcs->enable_vblank(kms,
>> priv->crtcs[vbl_work->crtc_id]);
>>> +	else
>>> +		kms->funcs->disable_vblank(kms,
>> priv->crtcs[vbl_work->crtc_id]);
>>> 
>>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> +	kfree(vbl_work);
>>>  }
>>> 
>>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>>  					int crtc_id, bool enable)
>>>  {
>>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>>> -	struct vblank_event *vbl_ev;
>>> -	unsigned long flags;
>>> +	struct msm_vblank_work *vbl_work;
>>> 
>>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>>> -	if (!vbl_ev)
>>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>>> +	if (!vbl_work)
>>>  		return -ENOMEM;
>>> 
>>> -	vbl_ev->crtc_id = crtc_id;
>>> -	vbl_ev->enable = enable;
>>> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>>> 
>>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> +	vbl_work->crtc_id = crtc_id;
>>> +	vbl_work->enable = enable;
>>> +	vbl_work->priv = priv;
>>> 
>>> -	schedule_work(&vbl_ctrl->work);
>>> +	schedule_work(&vbl_work->work);
>>> 
>>>  	return 0;
>>>  }
>>> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>>>  	struct msm_drm_private *priv = ddev->dev_private;
>>>  	struct msm_kms *kms = priv->kms;
>>>  	struct msm_mdss *mdss = priv->mdss;
>>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>>> -	struct vblank_event *vbl_ev, *tmp;
>>>  	int i;
>>> 
>>>  	/* We must cancel and cleanup any pending vblank enable/disable
>>>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>>>  	 * irq after uninstall has disabled it.
>>>  	 */
>>> +
>>>  	msm_gem_shrinker_cleanup(ddev);
>>> 
>>>  	drm_kms_helper_poll_fini(ddev);
>>> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>>>  #endif
>>>  	drm_mode_config_cleanup(ddev);
>>> 
>>> -	flush_work(&vbl_ctrl->work);
>>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> {
>>> -		list_del(&vbl_ev->node);
>>> -		kfree(vbl_ev);
>>> -	}
>>> -
>>>  	/* clean up event worker threads */
>>>  	for (i = 0; i < priv->num_crtcs; i++) {
>>>  		if (priv->event_thread[i].thread) {
>>> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, 
>>> struct
>> drm_driver *drv)
>>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>>> 
>>>  	INIT_LIST_HEAD(&priv->inactive_list);
>>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>>> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>>> 
>>>  	drm_mode_config_init(ddev);
>>> 
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> b/drivers/gpu/drm/msm/msm_drv.h
>>> index 05d33a7..d4cbde2 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.h
>>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>>  	PLANE_PROP_MAX_NUM
>>>  };
>>> 
>>> -struct msm_vblank_ctrl {
>>> -	struct work_struct work;
>>> -	struct list_head event_list;
>>> -	spinlock_t lock;
>>> -};
>>> -
>>>  #define MSM_GPU_MAX_RINGS 4
>>>  #define MAX_H_TILES_PER_DISPLAY 2
>>> 
>>> @@ -225,7 +219,6 @@ struct msm_drm_private {
>>>  	struct notifier_block vmap_notifier;
>>>  	struct shrinker shrinker;
>>> 
>>> -	struct msm_vblank_ctrl vblank_ctrl;
>>>  	struct drm_atomic_state *pm_state;
>>>  };
>>> 
>>> --
>>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
>> Forum,
>>> a Linux Foundation Collaborative Project
>>> 
>>> _______________________________________________
>>> Freedreno mailing list
>>> Freedreno at lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S


More information about the dri-devel mailing list