[Intel-gfx] [PATCH 06/18] drm/i915/guc: Drop guc->interrupts.enabled

John Harrison john.c.harrison at intel.com
Thu May 27 17:17:20 UTC 2021


On 5/25/2021 23:42, Matthew Brost wrote:
> Drop the variable guc->interrupts.enabled as this variable is just
> leading to bugs creeping into the code.
>
> e.g. A full GPU reset disables the GuC interrupts but forgot to clear
> guc->interrupts.enabled, guc->interrupts.enabled being true suppresses
> interrupts from getting re-enabled and now we are broken.
>
> It is harmless to enable interrupt while already enabled so let's just
> delete this variable to avoid bugs like this going forward.
Is it worth leaving the enabled flag in place but only using it to trip 
a WARN to catch such cases in a less catastrophic manner? Or are there 
valid reasons for calling enable when already enabled?

Either way, it seems like a plausible change and CI is happy with it, so:
Reviewed-by: John Harrison <John.C.Harrison at Intel.com>

John.

> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>   drivers/gpu/drm/i915/gt/uc/intel_guc.c | 27 +++++++++-----------------
>   drivers/gpu/drm/i915/gt/uc/intel_guc.h |  1 -
>   2 files changed, 9 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> index ab2c8fe8cdfa..18da9ed15728 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> @@ -96,12 +96,9 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
>   	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
>   
>   	spin_lock_irq(&gt->irq_lock);
> -	if (!guc->interrupts.enabled) {
> -		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
> -			     gt->pm_guc_events);
> -		guc->interrupts.enabled = true;
> -		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
> -	}
> +	WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
> +		     gt->pm_guc_events);
> +	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
>   	spin_unlock_irq(&gt->irq_lock);
>   }
>   
> @@ -112,7 +109,6 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
>   	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
>   
>   	spin_lock_irq(&gt->irq_lock);
> -	guc->interrupts.enabled = false;
>   
>   	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
>   
> @@ -134,18 +130,14 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
>   static void gen11_enable_guc_interrupts(struct intel_guc *guc)
>   {
>   	struct intel_gt *gt = guc_to_gt(guc);
> +	u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
>   
>   	spin_lock_irq(&gt->irq_lock);
> -	if (!guc->interrupts.enabled) {
> -		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
> -
> -		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
> -		intel_uncore_write(gt->uncore,
> -				   GEN11_GUC_SG_INTR_ENABLE, events);
> -		intel_uncore_write(gt->uncore,
> -				   GEN11_GUC_SG_INTR_MASK, ~events);
> -		guc->interrupts.enabled = true;
> -	}
> +	WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
> +	intel_uncore_write(gt->uncore,
> +			   GEN11_GUC_SG_INTR_ENABLE, events);
> +	intel_uncore_write(gt->uncore,
> +			   GEN11_GUC_SG_INTR_MASK, ~events);
>   	spin_unlock_irq(&gt->irq_lock);
>   }
>   
> @@ -154,7 +146,6 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc)
>   	struct intel_gt *gt = guc_to_gt(guc);
>   
>   	spin_lock_irq(&gt->irq_lock);
> -	guc->interrupts.enabled = false;
>   
>   	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
>   	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index c20f3839de12..4abc59f6f3cd 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -33,7 +33,6 @@ struct intel_guc {
>   	unsigned int msg_enabled_mask;
>   
>   	struct {
> -		bool enabled;
>   		void (*reset)(struct intel_guc *guc);
>   		void (*enable)(struct intel_guc *guc);
>   		void (*disable)(struct intel_guc *guc);



More information about the dri-devel mailing list