Skip to content

Commit 04ca842

Browse files
akhilpo-qcomRob Clark
authored andcommitted
drm/msm/a6xx: Poll AHB fence status in GPU IRQ handler
Even though the GX power domain is kept ON when there is a pending GPU interrupt, there is a small window of potential race with GMU where it may move the AHB fence to 'Drop' mode. Once the GMU sees the pending IRQ, it will move back the fence state to ALLOW mode. Close this race window by polling for AHB fence to ensure that it is in 'Allow' mode. Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com> Patchwork: https://patchwork.freedesktop.org/patch/673377/ Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
1 parent f195421 commit 04ca842

2 files changed

Lines changed: 29 additions & 0 deletions

File tree

drivers/gpu/drm/msm/adreno/a6xx_gmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
167167
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
168168
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
169169
interval, timeout)
170+
#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
171+
readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
172+
interval, timeout)
170173

171174
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
172175
{

drivers/gpu/drm/msm/adreno/a6xx_gpu.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1845,13 +1845,38 @@ static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on)
18451845
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
18461846
}
18471847

1848+
static int irq_poll_fence(struct msm_gpu *gpu)
1849+
{
1850+
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1851+
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1852+
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1853+
u32 status;
1854+
1855+
if (adreno_has_gmu_wrapper(adreno_gpu))
1856+
return 0;
1857+
1858+
if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
1859+
u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
1860+
1861+
dev_err_ratelimited(&gpu->pdev->dev,
1862+
"irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
1863+
status, rbbm_unmasked);
1864+
return -ETIMEDOUT;
1865+
}
1866+
1867+
return 0;
1868+
}
1869+
18481870
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
18491871
{
18501872
struct msm_drm_private *priv = gpu->dev->dev_private;
18511873

18521874
/* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
18531875
a6xx_gpu_keepalive_vote(gpu, true);
18541876

1877+
if (irq_poll_fence(gpu))
1878+
goto done;
1879+
18551880
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
18561881

18571882
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1888,6 +1913,7 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
18881913
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
18891914
a6xx_preempt_irq(gpu);
18901915

1916+
done:
18911917
a6xx_gpu_keepalive_vote(gpu, false);
18921918

18931919
return IRQ_HANDLED;

0 commit comments

Comments
 (0)