[PATCH 2/4] drm/amdgpu: cleanup IB pool handling a bit
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Apr 7 14:27:23 UTC 2020
Fix the coding style, move and rename the definitions to
better match what they are supposed to be doing.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 +---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 65 +++++++++++----------
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 13 +++++
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 5 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++--
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 3 +-
10 files changed, 71 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 32bc2a882fd2..912671f5a665 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -192,8 +192,6 @@ extern int amdgpu_cik_support;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
@@ -390,13 +388,6 @@ struct amdgpu_sa_bo {
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
-enum amdgpu_ib_pool_type {
- AMDGPU_IB_POOL_NORMAL = 0,
- AMDGPU_IB_POOL_VM,
- AMDGPU_IB_POOL_DIRECT,
-
- AMDGPU_IB_POOL_MAX
-};
/*
* IRQS.
*/
@@ -857,7 +848,7 @@ struct amdgpu_device {
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
- struct amdgpu_sa_manager ring_tmp_bo[AMDGPU_IB_POOL_MAX];
+ struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 79dd4ed8146b..1f16b0ff3953 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -921,7 +921,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ring = to_amdgpu_ring(entity->rq->sched);
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib);
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 77eaaba927ee..ec2c5e164cd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -61,14 +61,13 @@
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size,
- enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_ib *ib)
+ unsigned size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_ib *ib)
{
int r;
if (size) {
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type],
+ r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -302,30 +301,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- int r, i;
unsigned size;
+ int r, i;
- if (adev->ib_pool_ready) {
+ if (adev->ib_pool_ready)
return 0;
- }
+
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
if (i == AMDGPU_IB_POOL_DIRECT)
size = PAGE_SIZE * 2;
else
- size = AMDGPU_IB_POOL_SIZE*64*1024;
- r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
- size,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- for (i--; i >= 0; i--)
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
- return r;
- }
+ size = AMDGPU_IB_POOL_SIZE;
+
+ r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+ size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error;
}
adev->ib_pool_ready = true;
return 0;
+
+error:
+ while (i--)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ return r;
}
/**
@@ -340,11 +341,12 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
int i;
- if (adev->ib_pool_ready) {
- for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
- adev->ib_pool_ready = false;
- }
+ if (!adev->ib_pool_ready)
+ return;
+
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ adev->ib_pool_ready = false;
}
/**
@@ -359,9 +361,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
- unsigned i;
- int r, ret = 0;
long tmo_gfx, tmo_mm;
+ int r, ret = 0;
+ unsigned i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) {
@@ -439,15 +441,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- seq_printf(m, "-------------------- NORMAL -------------------- \n");
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m);
- seq_printf(m, "---------------------- VM ---------------------- \n");
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m);
- seq_printf(m, "-------------------- DIRECT--------------------- \n");
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m);
+ seq_printf(m, "--------------------- DELAYED --------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ m);
+ seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ m);
+ seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
-
}
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bfd634aedf26..efd7627b3f69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -50,6 +50,8 @@
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
+
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
@@ -63,6 +65,17 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ
};
+enum amdgpu_ib_pool_type {
+ /* Normal submissions to the top of the pipeline. */
+ AMDGPU_IB_POOL_DELAYED,
+ /* Immediate submissions to the bottom of the pipeline. */
+ AMDGPU_IB_POOL_IMMEDIATE,
+ /* Direct submission to the ring buffer during init and reset. */
+ AMDGPU_IB_POOL_DIRECT,
+
+ AMDGPU_IB_POOL_MAX
+};
+
struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 476f1f89aaad..2f4d5ca9894f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1b658e905620..153af5bdbec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -337,7 +337,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_NORMAL, &job);
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
@@ -2127,6 +2127,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, bool tmz)
{
+ enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -2144,8 +2146,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
- direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
if (r)
return r;
@@ -2234,7 +2235,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* for IB padding */
num_dw += 64;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 550282d9c1fc..5100ebe8858d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1056,8 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err;
}
- r = amdgpu_job_alloc_with_ib(adev, 64,
- direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d090455282e5..ecaa2d7483b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -447,7 +447,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -526,7 +526,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+ direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index fbd451f3559a..b96c8d9a1946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -61,11 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
+ enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
+ AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
- p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
if (r)
return r;
@@ -199,6 +200,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
+ enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
+ AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
uint64_t *pte;
int r;
@@ -224,8 +227,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
- p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+ &p->job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 78d769e13643..0655cd4f77a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -369,7 +369,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ &job);
if (r)
goto error_alloc;
--
2.17.1
More information about the amd-gfx
mailing list