[PATCH v3 08/15] drm/panfrost: Use a threaded IRQ for job interrupts

Boris Brezillon boris.brezillon at collabora.com
Fri Jun 25 14:37:53 UTC 2021


On Fri, 25 Jun 2021 09:47:59 -0400
Alyssa Rosenzweig <alyssa at collabora.com> wrote:

> A-b, but could you explain the context? Thanks

The rational behind this change is the complexity added to the
interrupt handler in patch 15. That means we might spend more time in
interrupt context after that patch and block other things on the system
while we dequeue job irqs. Moving things to a thread also helps
performances when the GPU gets faster as executing jobs than the CPU at
queueing them. In that case we keep switching back-and-forth between
interrupt and non-interrupt context which has a cost.

One drawback is increased latency when receiving job events and the
thread is idle, since you need to wake up the thread in that case.

> 
> On Fri, Jun 25, 2021 at 03:33:20PM +0200, Boris Brezillon wrote:
> > This should avoid switching to interrupt context when the GPU is under
> > heavy use.
> > 
> > v3:
> > * Don't take the job_lock in panfrost_job_handle_irq()
> > 
> > Signed-off-by: Boris Brezillon <boris.brezillon at collabora.com>
> > ---
> >  drivers/gpu/drm/panfrost/panfrost_job.c | 53 ++++++++++++++++++-------
> >  1 file changed, 38 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> > index be8f68f63974..e0c479e67304 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> > +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> > @@ -470,19 +470,12 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = {
> >  	.free_job = panfrost_job_free
> >  };
> >  
> > -static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
> > +static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
> >  {
> > -	struct panfrost_device *pfdev = data;
> > -	u32 status = job_read(pfdev, JOB_INT_STAT);
> >  	int j;
> >  
> >  	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
> >  
> > -	if (!status)
> > -		return IRQ_NONE;
> > -
> > -	pm_runtime_mark_last_busy(pfdev->dev);
> > -
> >  	for (j = 0; status; j++) {
> >  		u32 mask = MK_JS_MASK(j);
> >  
> > @@ -519,7 +512,6 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
> >  		if (status & JOB_INT_MASK_DONE(j)) {
> >  			struct panfrost_job *job;
> >  
> > -			spin_lock(&pfdev->js->job_lock);
> >  			job = pfdev->jobs[j];
> >  			/* Only NULL if job timeout occurred */
> >  			if (job) {
> > @@ -531,21 +523,49 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
> >  				dma_fence_signal_locked(job->done_fence);
> >  				pm_runtime_put_autosuspend(pfdev->dev);
> >  			}
> > -			spin_unlock(&pfdev->js->job_lock);
> >  		}
> >  
> >  		status &= ~mask;
> >  	}
> > +}
> >  
> > +static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
> > +{
> > +	struct panfrost_device *pfdev = data;
> > +	u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
> > +
> > +	while (status) {
> > +		pm_runtime_mark_last_busy(pfdev->dev);
> > +
> > +		spin_lock(&pfdev->js->job_lock);
> > +		panfrost_job_handle_irq(pfdev, status);
> > +		spin_unlock(&pfdev->js->job_lock);
> > +		status = job_read(pfdev, JOB_INT_RAWSTAT);
> > +	}
> > +
> > +	job_write(pfdev, JOB_INT_MASK,
> > +		  GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
> > +		  GENMASK(NUM_JOB_SLOTS - 1, 0));
> >  	return IRQ_HANDLED;
> >  }
> >  
> > +static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
> > +{
> > +	struct panfrost_device *pfdev = data;
> > +	u32 status = job_read(pfdev, JOB_INT_STAT);
> > +
> > +	if (!status)
> > +		return IRQ_NONE;
> > +
> > +	job_write(pfdev, JOB_INT_MASK, 0);
> > +	return IRQ_WAKE_THREAD;
> > +}
> > +
> >  static void panfrost_reset(struct work_struct *work)
> >  {
> >  	struct panfrost_device *pfdev = container_of(work,
> >  						     struct panfrost_device,
> >  						     reset.work);
> > -	unsigned long flags;
> >  	unsigned int i;
> >  	bool cookie;
> >  
> > @@ -575,7 +595,7 @@ static void panfrost_reset(struct work_struct *work)
> >  	/* All timers have been stopped, we can safely reset the pending state. */
> >  	atomic_set(&pfdev->reset.pending, 0);
> >  
> > -	spin_lock_irqsave(&pfdev->js->job_lock, flags);
> > +	spin_lock(&pfdev->js->job_lock);
> >  	for (i = 0; i < NUM_JOB_SLOTS; i++) {
> >  		if (pfdev->jobs[i]) {
> >  			pm_runtime_put_noidle(pfdev->dev);
> > @@ -583,7 +603,7 @@ static void panfrost_reset(struct work_struct *work)
> >  			pfdev->jobs[i] = NULL;
> >  		}
> >  	}
> > -	spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
> > +	spin_unlock(&pfdev->js->job_lock);
> >  
> >  	panfrost_device_reset(pfdev);
> >  
> > @@ -610,8 +630,11 @@ int panfrost_job_init(struct panfrost_device *pfdev)
> >  	if (irq <= 0)
> >  		return -ENODEV;
> >  
> > -	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
> > -			       IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
> > +	ret = devm_request_threaded_irq(pfdev->dev, irq,
> > +					panfrost_job_irq_handler,
> > +					panfrost_job_irq_handler_thread,
> > +					IRQF_SHARED, KBUILD_MODNAME "-job",
> > +					pfdev);
> >  	if (ret) {
> >  		dev_err(pfdev->dev, "failed to request job irq");
> >  		return ret;
> > -- 
> > 2.31.1
> >   



More information about the dri-devel mailing list