[PATCH v2 10/10] drm/xe/vf: Add VF specific interrupt handler
Matt Roper
matthew.d.roper at intel.com
Fri Dec 15 22:40:35 UTC 2023
On Thu, Dec 14, 2023 at 07:59:55PM +0100, Michal Wajdeczko wrote:
> There are small differences in handling of the register based
> interrupts on the VF driver as some registers are not accessible
> to the VF driver. Additionally VFs must support Memory Based
> Interrupts. Add VF specific interrupt handler for this.
>
> Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
> Cc: Matt Roper <matthew.d.roper at intel.com>
Reviewed-by: Matt Roper <matthew.d.roper at intel.com>
> ---
> drivers/gpu/drm/xe/xe_irq.c | 71 +++++++++++++++++++++++++++++++++++++
> 1 file changed, 71 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
> index d1f5ba4bb745..907c8ff0fa21 100644
> --- a/drivers/gpu/drm/xe/xe_irq.c
> +++ b/drivers/gpu/drm/xe/xe_irq.c
> @@ -17,7 +17,9 @@
> #include "xe_gt.h"
> #include "xe_guc.h"
> #include "xe_hw_engine.h"
> +#include "xe_memirq.h"
> #include "xe_mmio.h"
> +#include "xe_sriov.h"
>
> /*
> * Interrupt registers for a unit are always consecutive and ordered
> @@ -498,6 +500,9 @@ static void xelp_irq_reset(struct xe_tile *tile)
>
> gt_irq_reset(tile);
>
> + if (IS_SRIOV_VF(tile_to_xe(tile)))
> + return;
> +
> mask_and_disable(tile, PCU_IRQ_OFFSET);
> }
>
> @@ -508,6 +513,9 @@ static void dg1_irq_reset(struct xe_tile *tile)
>
> gt_irq_reset(tile);
>
> + if (IS_SRIOV_VF(tile_to_xe(tile)))
> + return;
> +
> mask_and_disable(tile, PCU_IRQ_OFFSET);
> }
>
> @@ -518,11 +526,34 @@ static void dg1_irq_reset_mstr(struct xe_tile *tile)
> xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
> }
>
> +static void vf_irq_reset(struct xe_device *xe)
> +{
> + struct xe_tile *tile;
> + unsigned int id;
> +
> + xe_assert(xe, IS_SRIOV_VF(xe));
> +
> + if (GRAPHICS_VERx100(xe) < 1210)
> + xelp_intr_disable(xe);
> + else
> + xe_assert(xe, xe_device_has_memirq(xe));
> +
> + for_each_tile(tile, xe, id) {
> + if (xe_device_has_memirq(xe))
> + xe_memirq_reset(&tile->sriov.vf.memirq);
> + else
> + gt_irq_reset(tile);
> + }
> +}
> +
> static void xe_irq_reset(struct xe_device *xe)
> {
> struct xe_tile *tile;
> u8 id;
>
> + if (IS_SRIOV_VF(xe))
> + return vf_irq_reset(xe);
> +
> for_each_tile(tile, xe, id) {
> if (GRAPHICS_VERx100(xe) >= 1210)
> dg1_irq_reset(tile);
> @@ -545,8 +576,26 @@ static void xe_irq_reset(struct xe_device *xe)
> }
> }
>
> +static void vf_irq_postinstall(struct xe_device *xe)
> +{
> + struct xe_tile *tile;
> + unsigned int id;
> +
> + for_each_tile(tile, xe, id)
> + if (xe_device_has_memirq(xe))
> + xe_memirq_postinstall(&tile->sriov.vf.memirq);
> +
> + if (GRAPHICS_VERx100(xe) < 1210)
> + xelp_intr_enable(xe, true);
> + else
> + xe_assert(xe, xe_device_has_memirq(xe));
> +}
> +
> static void xe_irq_postinstall(struct xe_device *xe)
> {
> + if (IS_SRIOV_VF(xe))
> + return vf_irq_postinstall(xe);
> +
> xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
>
> /*
> @@ -563,8 +612,30 @@ static void xe_irq_postinstall(struct xe_device *xe)
> xelp_intr_enable(xe, true);
> }
>
> +static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
> +{
> + struct xe_device *xe = arg;
> + struct xe_tile *tile;
> + unsigned int id;
> +
> + spin_lock(&xe->irq.lock);
> + if (!xe->irq.enabled) {
> + spin_unlock(&xe->irq.lock);
> + return IRQ_NONE;
> + }
> + spin_unlock(&xe->irq.lock);
> +
> + for_each_tile(tile, xe, id)
> + xe_memirq_handler(&tile->sriov.vf.memirq);
> +
> + return IRQ_HANDLED;
> +}
> +
> static irq_handler_t xe_irq_handler(struct xe_device *xe)
> {
> + if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
> + return vf_mem_irq_handler;
> +
> if (GRAPHICS_VERx100(xe) >= 1210)
> return dg1_irq_handler;
> else
> --
> 2.25.1
>
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list