[Intel-gfx] [RFC 13/15] drm/i915: Introduce intel_schedule_vblank_job()

Daniel Vetter daniel at ffwll.ch
Thu May 21 08:52:59 PDT 2015


On Thu, May 21, 2015 at 04:20:04PM +0300, Ville Syrjälä wrote:
> On Wed, May 20, 2015 at 07:12:25PM -0700, Matt Roper wrote:
> > Various places in the driver need the ability to schedule actions to run
> > on a future vblank (updating watermarks, unpinning buffers, etc.).  The
> > long term goal is to add such a mechanism in the DRM core that can be
> > shared by all drivers.  Paulo Zanoni has already written some
> > preliminary patches to support this, but his work will probably take
> > some time to polish and get merged since it needs to untangle the DRM
> > core's vblank infrastructure.  This patch is partially based on Paulo's
> > work, but takes a simpler approach and just adds an i915-specific
> > mechanism that can be used in the interim since we have an immediate
> > need for watermark updates.
> > 
> > Inspired-by: Paulo Zanoni <przanoni at gmail.com>
> > Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
> 
> I like what I had better. No needless kmallocs (this IMO is a must
> requirement for any solution), no needless workqueues, uses the hw
> vblank counter and absolute values so isn't racy.

Yeah the kmalloc in Paulo's patches was one of the things I didn't like.
We need to do the same like with timers/work items and make them
embedded into something we already have allocated.
-Daniel

> 
> > ---
> >  drivers/gpu/drm/i915/i915_irq.c      | 117 +++++++++++++++++++++++++++++++++++
> >  drivers/gpu/drm/i915/intel_display.c |   8 +++
> >  drivers/gpu/drm/i915/intel_drv.h     |  27 ++++++++
> >  3 files changed, 152 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> > index 557af88..fd5a795 100644
> > --- a/drivers/gpu/drm/i915/i915_irq.c
> > +++ b/drivers/gpu/drm/i915/i915_irq.c
> > @@ -1649,8 +1649,37 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
> >  	}
> >  }
> >  
> > +static void process_vblank_jobs(struct drm_device *dev, enum pipe pipe)
> > +{
> > +	struct drm_i915_private *dev_priv = dev->dev_private;
> > +	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
> > +	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> > +	struct i915_vblank_job *job, *tmp;
> > +	u32 seq;
> > +
> > +	seq = drm_vblank_count(dev, pipe);
> > +	list_for_each_entry_safe(job, tmp, &intel_crtc->vblank_jobs, link) {
> > +		if (seq - job->seq > (1<<23))
> > +			continue;
> > +
> > +		list_del(&job->link);
> > +		drm_vblank_put(dev, pipe);
> > +
> > +		if (job->wq) {
> > +			job->fired_seq = seq;
> > +			queue_work(job->wq, &job->work);
> > +		} else {
> > +			job->callback(intel_crtc, job->callback_data,
> > +				      false, seq);
> > +			kfree(job);
> > +		}
> > +	}
> > +}
> > +
> >  static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
> >  {
> > +	process_vblank_jobs(dev, pipe);
> > +
> >  	if (!drm_handle_vblank(dev, pipe))
> >  		return false;
> >  
> > @@ -4511,3 +4540,91 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
> >  	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
> >  	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
> >  }
> > +
> > +static void vblank_job_handler(struct work_struct *work)
> > +{
> > +	struct i915_vblank_job *job =
> > +		container_of(work, struct i915_vblank_job, work);
> > +
> > +	job->callback(job->crtc, job->callback_data,
> > +		      job->seq != job->fired_seq, job->fired_seq);
> > +	kfree(job);
> > +}
> > +
> > +/**
> > + * intel_schedule_vblank_job - schedule work to happen on specified vblank
> > + * @crtc: CRTC whose vblank should trigger the work
> > + * @callback: Code to run on vblank
> > + * @callback_data: Data to pass to callback function
> > + * @wq: Workqueue to run job on (or NULL to run from interrupt context)
> > + * @seq: vblank count relative to current to schedule for
> > + *
> > + * Schedule code that should be run after the specified vblank fires.  The code
> > + * can either run directly in the interrupt context or on a specified
> > + * workqueue.
> > + */
> > +int intel_schedule_vblank_job(struct intel_crtc *crtc,
> > +			      i915_vblank_callback_t callback,
> > +			      void *callback_data,
> > +			      struct workqueue_struct *wq,
> > +			      u32 seq)
> > +{
> > +	struct drm_device *dev = crtc->base.dev;
> > +	struct i915_vblank_job *job;
> > +	unsigned long irqflags;
> > +	int ret;
> > +
> > +	job = kzalloc(sizeof(*job), GFP_ATOMIC);
> > +	if (!job)
> > +		return -ENOMEM;
> > +
> > +	job->wq = wq;
> > +	job->crtc = crtc;
> > +	job->callback = callback;
> > +	job->callback_data = callback_data;
> > +	if (job->wq)
> > +		INIT_WORK(&job->work, vblank_job_handler);
> > +
> > +	ret = drm_crtc_vblank_get(&crtc->base);
> > +	if (ret) {
> > +		DRM_DEBUG_KMS("Failed to enable interrupts, ret=%d\n", ret);
> > +		kfree(job);
> > +		return -EINVAL;
> > +	}
> > +
> > +	spin_lock_irqsave(&dev->event_lock, irqflags);
> > +	job->seq = seq + drm_vblank_count(dev, crtc->pipe);
> > +	list_add_tail(&job->link, &crtc->vblank_jobs);
> > +	spin_unlock_irqrestore(&dev->event_lock, irqflags);
> > +
> > +	return 0;
> > +}
> > +
> > +/**
> > + * intel_trigger_all_vblank_jobs - immediately trigger vblank jobs for a crtc
> > + * @crtc: CRTC to trigger all outstanding jobs for
> > + *
> > + * Immediately triggers all of the jobs awaiting future vblanks on a CRTC.
> > + * This will be called when a CRTC is disabled (because there may never be
> > + * another vblank event).  The job callbacks will receive 'true' for the
> > + * early parameter, as well as the current vblank count.
> > + */
> > +void trigger_all_vblank_jobs(struct intel_crtc *crtc)
> > +{
> > +	struct i915_vblank_job *job, *tmp;
> > +	u32 seq;
> > +
> > +	seq = drm_vblank_count(crtc->base.dev, crtc->pipe);
> > +	list_for_each_entry_safe(job, tmp, &crtc->vblank_jobs, link) {
> > +		list_del(&job->link);
> > +		drm_vblank_put(crtc->base.dev, crtc->pipe);
> > +
> > +		if (job->wq) {
> > +			job->fired_seq = seq;
> > +			queue_work(job->wq, &job->work);
> > +		} else {
> > +			job->callback(crtc, job->callback_data, true, seq);
> > +			kfree(job);
> > +		}
> > +	}
> > +}
> > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> > index 76affba..9b56d07 100644
> > --- a/drivers/gpu/drm/i915/intel_display.c
> > +++ b/drivers/gpu/drm/i915/intel_display.c
> > @@ -5099,6 +5099,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
> >  	if (!intel_crtc->active)
> >  		return;
> >  
> > +	trigger_all_vblank_jobs(intel_crtc);
> > +
> >  	for_each_encoder_on_crtc(dev, crtc, encoder)
> >  		encoder->disable(encoder);
> >  
> > @@ -5161,6 +5163,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
> >  	if (!intel_crtc->active)
> >  		return;
> >  
> > +	trigger_all_vblank_jobs(intel_crtc);
> > +
> >  	for_each_encoder_on_crtc(dev, crtc, encoder) {
> >  		intel_opregion_notify_encoder(encoder, false);
> >  		encoder->disable(encoder);
> > @@ -6004,6 +6008,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
> >  	if (!intel_crtc->active)
> >  		return;
> >  
> > +	trigger_all_vblank_jobs(intel_crtc);
> > +
> >  	/*
> >  	 * On gen2 planes are double buffered but the pipe isn't, so we must
> >  	 * wait for planes to fully turn off before disabling the pipe.
> > @@ -13649,6 +13655,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
> >  
> >  	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
> >  
> > +	INIT_LIST_HEAD(&intel_crtc->vblank_jobs);
> > +
> >  	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
> >  	return;
> >  
> > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> > index 627741a..630e7c1 100644
> > --- a/drivers/gpu/drm/i915/intel_drv.h
> > +++ b/drivers/gpu/drm/i915/intel_drv.h
> > @@ -504,6 +504,24 @@ struct intel_crtc_atomic_commit {
> >  	unsigned update_sprite_watermarks;
> >  };
> >  
> > +typedef void (*i915_vblank_callback_t)(struct intel_crtc *crtc,
> > +				       void *data,
> > +				       bool early,
> > +				       u32 fired_seq);
> > +
> > +/* Task to run (or schedule on a workqueue) on a specific vblank */
> > +struct i915_vblank_job {
> > +	u32 seq;
> > +	u32 fired_seq;                     /* early if crtc gets disabled */
> > +	struct intel_crtc *crtc;
> > +	struct workqueue_struct *wq;       /* NULL = run from interrupt */
> > +	i915_vblank_callback_t callback;
> > +	void *callback_data;
> > +
> > +	struct list_head link;
> > +	struct work_struct work;
> > +};
> > +
> >  struct intel_crtc {
> >  	struct drm_crtc base;
> >  	enum pipe pipe;
> > @@ -550,6 +568,9 @@ struct intel_crtc {
> >  
> >  	/* scalers available on this crtc */
> >  	int num_scalers;
> > +
> > +	/* Jobs to run/schedule on vblank */
> > +	struct list_head vblank_jobs;
> >  };
> >  
> >  struct intel_plane_wm_parameters {
> > @@ -913,6 +934,12 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
> >  int intel_get_crtc_scanline(struct intel_crtc *crtc);
> >  void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
> >  				     unsigned int pipe_mask);
> > +int intel_schedule_vblank_job(struct intel_crtc *crtc,
> > +			      i915_vblank_callback_t callback,
> > +			      void *callback_data,
> > +			      struct workqueue_struct *wq,
> > +			      u32 seq);
> > +void trigger_all_vblank_jobs(struct intel_crtc *crtc);
> >  
> >  /* intel_crt.c */
> >  void intel_crt_init(struct drm_device *dev);
> > -- 
> > 1.8.5.1
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Ville Syrjälä
> Intel OTC
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the Intel-gfx mailing list