[RFC PATCH 1/2] drm/amdgpu: implement more ib pools
Christian König
christian.koenig at amd.com
Thu Mar 26 11:02:40 UTC 2020
Am 26.03.20 um 10:47 schrieb xinhui pan:
> We have tree ib pools, they are normal, VM, direct pools.
>
> Any jobs which schedule IBs without dependence on gpu scheduler should
> use DIRECT pool.
>
> Any jobs schedule direct VM update IBs should use VM pool.
>
> Any other jobs use NORMAL pool.
>
> Signed-off-by: xinhui pan <xinhui.pan at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 ++++++-
> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +--
> drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 41 ++++++++++++++++++-------
> drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 8 ++++-
> drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 4 ++-
> 5 files changed, 52 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 7dd74253e7b6..3d70c113c205 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -390,6 +390,13 @@ struct amdgpu_sa_bo {
> int amdgpu_fence_slab_init(void);
> void amdgpu_fence_slab_fini(void);
>
> +enum amd_ib_pool_type {
> + AMD_IB_POOL_NORMAL = 0,
> + AMD_IB_POOL_VM,
> + AMD_IB_POOL_DIRECT,
> +
> + AMD_MAX_IB_POOL
> +};
Please use the prefix amdgpu_ib_pool_* here.
> /*
> * IRQS.
> */
> @@ -442,6 +449,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
>
> int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> unsigned size, struct amdgpu_ib *ib);
> +int amdgpu_ib_get_with_pool(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> + unsigned size, struct amdgpu_ib *ib, enum amd_ib_pool_type pool);
Don't add a new function, just change the existing amdgpu_ib_get() and
all its callers.
> void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
> struct dma_fence *f);
> int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
> @@ -848,7 +857,7 @@ struct amdgpu_device {
> unsigned num_rings;
> struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
> bool ib_pool_ready;
> - struct amdgpu_sa_manager ring_tmp_bo;
> + struct amdgpu_sa_manager ring_tmp_bo[AMD_MAX_IB_POOL];
>
> /* interrupts */
> struct amdgpu_irq irq;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 59ec5e2be211..182c7ee439cc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -920,8 +920,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
> parser->entity = entity;
>
> ring = to_amdgpu_ring(entity->rq->sched);
> - r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
> - chunk_ib->ib_bytes : 0, ib);
> + r = amdgpu_ib_get_with_pool(adev, vm, ring->funcs->parse_cs ?
> + chunk_ib->ib_bytes : 0, ib, AMD_IB_POOL_NORMAL);
> if (r) {
> DRM_ERROR("Failed to get ib !\n");
> return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> index bece01f1cf09..460b50a5f875 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> @@ -62,11 +62,17 @@
> */
> int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> unsigned size, struct amdgpu_ib *ib)
> +{
> + return amdgpu_ib_get_with_pool(adev, vm, size, ib, AMD_IB_POOL_DIRECT);
> +}
> +
> +int amdgpu_ib_get_with_pool(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> + unsigned size, struct amdgpu_ib *ib, enum amd_ib_pool_type pool_type)
> {
> int r;
>
> if (size) {
> - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
> + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type],
> &ib->sa_bo, size, 256);
> if (r) {
> dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
> @@ -297,19 +303,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
> */
> int amdgpu_ib_pool_init(struct amdgpu_device *adev)
> {
> - int r;
> + int r, i;
> + unsigned size;
>
> if (adev->ib_pool_ready) {
> return 0;
> }
> - r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
> - AMDGPU_IB_POOL_SIZE*64*1024,
> - AMDGPU_GPU_PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_GTT);
> - if (r) {
> - return r;
> + for (i = 0; i < AMD_MAX_IB_POOL; i++) {
> + if (i == AMD_IB_POOL_DIRECT)
> + size = PAGE_SIZE * 2;
> + else
> + size = AMDGPU_IB_POOL_SIZE*64*1024;
> + r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
> + size,
> + AMDGPU_GPU_PAGE_SIZE,
> + AMDGPU_GEM_DOMAIN_GTT);
> + if (r) {
> + for (i--; i >= 0; i--)
> + amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
> + return r;
> + }
> }
> -
> adev->ib_pool_ready = true;
>
> return 0;
> @@ -325,8 +339,11 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
> */
> void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
> {
> + int i;
> +
> if (adev->ib_pool_ready) {
> - amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
> + for (i = 0; i < AMD_MAX_IB_POOL; i++)
> + amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
> adev->ib_pool_ready = false;
> }
> }
> @@ -423,7 +440,9 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
> struct drm_device *dev = node->minor->dev;
> struct amdgpu_device *adev = dev->dev_private;
>
> - amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
> + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMD_IB_POOL_NORMAL], m);
> + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMD_IB_POOL_VM], m);
> + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMD_IB_POOL_DIRECT], m);
Maybe add something like seq_printf(m, "-------------------- name of
pool ----------------"); between each call.
>
> return 0;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 4981e443a884..2e98ce568a3f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -88,6 +88,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
>
> int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
> struct amdgpu_job **job)
> +{
> + return amdgpu_job_alloc_with_ib_pool(adev, size, job, AMD_IB_POOL_NORMAL);
> +}
> +
> +int amdgpu_job_alloc_with_ib_pool(struct amdgpu_device *adev, unsigned size,
> + struct amdgpu_job **job, enum amd_ib_pool_type pool_type)
> {
> int r;
>
> @@ -95,7 +101,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
> if (r)
> return r;
>
> - r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
> + r = amdgpu_ib_get_with_pool(adev, NULL, size, &(*job)->ibs[0], pool_type);
> if (r)
> kfree(*job);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> index 2e2110dddb76..c34516bf9278 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> @@ -38,6 +38,7 @@
> #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
>
> struct amdgpu_fence;
> +enum amd_ib_pool_type;
>
> struct amdgpu_job {
> struct drm_sched_job base;
> @@ -67,7 +68,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
> struct amdgpu_job **job, struct amdgpu_vm *vm);
> int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
> struct amdgpu_job **job);
> -
> +int amdgpu_job_alloc_with_ib_pool(struct amdgpu_device *adev, unsigned size,
> + struct amdgpu_job **job, enum amd_ib_pool_type pool);
Again just add the new parameter to amdgpu_job_alloc_with_ib() instead
of adding a new function.
Apart from that looks good to me,
Christian.
> void amdgpu_job_free_resources(struct amdgpu_job *job);
> void amdgpu_job_free(struct amdgpu_job *job);
> int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
More information about the amd-gfx
mailing list