[PATCH 10/17] drm/msm/a6xx: Poll AHB fence status in GPU IRQ handler
Akhil P Oommen
akhilpo at oss.qualcomm.com
Sun Jul 20 12:16:11 UTC 2025
Even though GX power domain is kept ON when there is a pending GPU
interrupt, there is a small window of potential race with GMU where it
may move the AHB fence to 'Drop' mode. Close this race window by polling
for AHB fence to ensure that it is in 'Allow' mode.
Signed-off-by: Akhil P Oommen <akhilpo at oss.qualcomm.com>
---
drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 3 +++
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 26 ++++++++++++++++++++++++++
2 files changed, 29 insertions(+)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 034f1b4e5a3fb9cd601bfbe6d06d64e5ace3b6e7..62c98b198551f26b99bd6e094f8fa35e16ec550d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -164,6 +164,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index f000915a4c2698a85b45bd3c92e590f14999d10d..e331cbdb117df6cfa8ae0e4c44a5aa91ba93f8eb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1823,6 +1823,28 @@ static void set_keepalive_vote(struct msm_gpu *gpu, bool on)
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
}
+static int irq_poll_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
+ u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
+ status, rbbm_unmasked);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
@@ -1830,6 +1852,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
/* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
set_keepalive_vote(gpu, true);
+ if (irq_poll_fence(gpu))
+ goto done;
+
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1866,6 +1891,7 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
a6xx_preempt_irq(gpu);
+done:
set_keepalive_vote(gpu, false);
return IRQ_HANDLED;
--
2.50.1
More information about the dri-devel
mailing list