[PATCH 6/6] drm/amdgpu: Implement ib_preemmpt interface for sdma
Rex Zhu
Rex.Zhu at amd.com
Wed Oct 31 12:25:36 UTC 2018
sdma can be preempted via this interface
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 14 ++++++++++++++
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 15 +++++++++++++++
2 files changed, 29 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index de280d4..388d3eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -545,6 +545,19 @@ static void sdma_v3_0_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned of
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
+static void sdma_v3_0_ring_ib_preempt(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 index = 0;
+
+ if (!amdgpu_sriov_vf(adev) && adev->gpu_preemption) {
+ amdgpu_ring_alloc(ring, 16);
+ amdgpu_ring_set_preempt_cond_exec(ring, true);
+ WREG32(index == 0 ? mmSDMA0_GFX_PREEMPT : mmSDMA1_GFX_PREEMPT, 0x1);
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr+8, 0xff, AMDGPU_FENCE_FLAG_CLEAR_PREEMPT);
+ amdgpu_ring_commit(ring);
+ }
+}
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
@@ -1660,6 +1673,7 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
.emit_wreg = sdma_v3_0_ring_emit_wreg,
.init_cond_exec = sdma_v3_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v3_0_ring_patch_cond_exec,
+ .ib_preempt = sdma_v3_0_ring_ib_preempt,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33bdeeb..f47a3d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -654,6 +654,20 @@ static void sdma_v4_0_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned of
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
+static void sdma_v4_0_ring_ib_preempt(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 index = 0;
+
+ if (!amdgpu_sriov_vf(adev) && adev->gpu_preemption) {
+ amdgpu_ring_alloc(ring, 16);
+ amdgpu_ring_set_preempt_cond_exec(ring, true);
+ WREG32(index == 0 ? mmSDMA0_GFX_PREEMPT : mmSDMA1_GFX_PREEMPT, 0x1);
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr+8, 0xff, AMDGPU_FENCE_FLAG_CLEAR_PREEMPT);
+ amdgpu_ring_commit(ring);
+ }
+}
+
/**
* sdma_v4_0_gfx_stop - stop the gfx async dma engines
*
@@ -1996,6 +2010,7 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.init_cond_exec = sdma_v4_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v4_0_ring_patch_cond_exec,
+ .ib_preempt = sdma_v4_0_ring_ib_preempt,
};
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
--
1.9.1
More information about the amd-gfx
mailing list