[PATCH 8/8] drm/amdgpu/sdma4: Enable sdma poll mem addr on vega10 for SRIOV
Xiangliang.Yu
Xiangliang.Yu at amd.com
Tue Jul 25 09:18:09 UTC 2017
From: Frank Min <Frank.Min at amd.com>
While doing flr on VFs, there is possibility to lost the doorbell
writing for sdma, so enable poll mem for sdma, then sdma fw would
check the pollmem holding wptr.
Signed-off-by: Frank Min <Frank.Min at amd.com>
Signed-off-by: Xiangliang.Yu <Xiangliang.Yu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +++
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 48 ++++++++++++++++++++++++++++++++--
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d287621..79d46fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1208,6 +1208,9 @@ struct amdgpu_sdma_instance {
struct amdgpu_ring ring;
bool burst_nop;
+ struct amdgpu_bo *poll_mem_bo;
+ uint64_t *poll_mem_cpuaddr;
+ uint64_t poll_mem_gpuaddr;
};
struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 591f3e7..563be32 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -35,6 +35,7 @@
#include "vega10/MMHUB/mmhub_1_0_offset.h"
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
#include "vega10/HDP/hdp_4_0_offset.h"
+#include "vega10/NBIO/nbio_6_1_offset.h"
#include "raven1/SDMA0/sdma0_4_1_default.h"
#include "soc15_common.h"
@@ -287,6 +288,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
*/
static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{
+ int i;
struct amdgpu_device *adev = ring->adev;
DRM_DEBUG("Setting write pointer\n");
@@ -303,6 +305,16 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
+
+ if (amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (&adev->sdma.instance[i].ring == ring) {
+ *adev->sdma.instance[i].poll_mem_cpuaddr = ring->wptr << 2;
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ }
+ }
+ }
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -573,7 +585,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_addr_lo, wptr_poll_addr_hi, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
@@ -687,6 +699,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+
+ if (amdgpu_sriov_vf(adev)) {
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_addr_lo = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO));
+ wptr_poll_addr_lo = REG_SET_FIELD(wptr_poll_addr_lo, SDMA0_GFX_RB_WPTR_POLL_ADDR_LO, ADDR,
+ lower_32_bits(adev->sdma.instance[i].poll_mem_gpuaddr) >> 2);
+ wptr_poll_addr_hi = upper_32_bits(adev->sdma.instance[i].poll_mem_gpuaddr);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), wptr_poll_addr_lo);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), wptr_poll_addr_hi);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ }
}
return 0;
@@ -1247,6 +1272,20 @@ static int sdma_v4_0_sw_init(void *handle)
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_bo_create_kernel(adev, 8, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->sdma.instance[i].poll_mem_bo,
+ &adev->sdma.instance[i].poll_mem_gpuaddr,
+ (void **)&adev->sdma.instance[i].poll_mem_cpuaddr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate SDMA bo\n", r);
+ return r;
+ }
+ DRM_DEBUG("sdma%d polling memory gpu addr: 0x%llx cpu addr: 0x%p\n", i,
+ adev->sdma.instance[i].poll_mem_gpuaddr,
+ adev->sdma.instance[i].poll_mem_cpuaddr);
+ }
if (r)
return r;
}
@@ -1259,9 +1298,14 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_bo_free_kernel(&adev->sdma.instance[i].poll_mem_bo,
+ &adev->sdma.instance[i].poll_mem_gpuaddr,
+ (void *)&adev->sdma.instance[i].poll_mem_cpuaddr);
+ }
return 0;
}
--
2.7.4
More information about the amd-gfx
mailing list