[PATCH 13/34] drm/amdgpu: Convert ctx_handles to XArray
Christian König
ckoenig.leichtzumerken at gmail.com
Mon Feb 25 16:07:24 UTC 2019
Am 21.02.19 um 19:41 schrieb Matthew Wilcox:
> Signed-off-by: Matthew Wilcox <willy at infradead.org>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 42 ++++++++---------------
> drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 2 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 4 +--
> 4 files changed, 19 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 6a704aaa7dbe..c2650f143ba7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -164,7 +164,7 @@ extern int amdgpu_si_support;
> extern int amdgpu_cik_support;
> #endif
>
> -#define AMDGPU_VM_MAX_NUM_CTX 4096
> +#define AMDGPU_VM_CTX_LIMIT XA_LIMIT(0, 4095)
IIRC we actually use 0 as reserved context value in some places.
Christian.
> #define AMDGPU_SG_THRESHOLD (256*1024*1024)
> #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
> #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index d85184b5b35c..bddc28b1c9ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -248,17 +248,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
> return -ENOMEM;
>
> mutex_lock(&mgr->lock);
> - r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
> + r = xa_alloc(&mgr->ctx_handles, id, ctx, AMDGPU_VM_CTX_LIMIT,
> + GFP_KERNEL);
> if (r < 0) {
> mutex_unlock(&mgr->lock);
> kfree(ctx);
> return r;
> }
>
> - *id = (uint32_t)r;
> r = amdgpu_ctx_init(adev, priority, filp, ctx);
> if (r) {
> - idr_remove(&mgr->ctx_handles, *id);
> + xa_erase(&mgr->ctx_handles, *id);
> *id = 0;
> kfree(ctx);
> }
> @@ -290,7 +290,7 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
> struct amdgpu_ctx *ctx;
>
> mutex_lock(&mgr->lock);
> - ctx = idr_remove(&mgr->ctx_handles, id);
> + ctx = xa_erase(&mgr->ctx_handles, id);
> if (ctx)
> kref_put(&ctx->refcount, amdgpu_ctx_do_release);
> mutex_unlock(&mgr->lock);
> @@ -310,7 +310,7 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
>
> mgr = &fpriv->ctx_mgr;
> mutex_lock(&mgr->lock);
> - ctx = idr_find(&mgr->ctx_handles, id);
> + ctx = xa_load(&mgr->ctx_handles, id);
> if (!ctx) {
> mutex_unlock(&mgr->lock);
> return -EINVAL;
> @@ -345,7 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
>
> mgr = &fpriv->ctx_mgr;
> mutex_lock(&mgr->lock);
> - ctx = idr_find(&mgr->ctx_handles, id);
> + ctx = xa_load(&mgr->ctx_handles, id);
> if (!ctx) {
> mutex_unlock(&mgr->lock);
> return -EINVAL;
> @@ -419,7 +419,7 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
> mgr = &fpriv->ctx_mgr;
>
> mutex_lock(&mgr->lock);
> - ctx = idr_find(&mgr->ctx_handles, id);
> + ctx = xa_load(&mgr->ctx_handles, id);
> if (ctx)
> kref_get(&ctx->refcount);
> mutex_unlock(&mgr->lock);
> @@ -533,22 +533,18 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
> void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
> {
> mutex_init(&mgr->lock);
> - idr_init(&mgr->ctx_handles);
> + xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
> }
>
> void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
> {
> unsigned num_entities = amdgput_ctx_total_num_entities();
> struct amdgpu_ctx *ctx;
> - struct idr *idp;
> - uint32_t id, i;
> + unsigned long id, i;
> long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
>
> - idp = &mgr->ctx_handles;
> -
> mutex_lock(&mgr->lock);
> - idr_for_each_entry(idp, ctx, id) {
> -
> + xa_for_each(&mgr->ctx_handles, id, ctx) {
> if (!ctx->adev) {
> mutex_unlock(&mgr->lock);
> return;
> @@ -568,13 +564,9 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
> {
> unsigned num_entities = amdgput_ctx_total_num_entities();
> struct amdgpu_ctx *ctx;
> - struct idr *idp;
> - uint32_t id, i;
> -
> - idp = &mgr->ctx_handles;
> -
> - idr_for_each_entry(idp, ctx, id) {
> + unsigned long id, i;
>
> + xa_for_each(&mgr->ctx_handles, id, ctx) {
> if (!ctx->adev)
> return;
>
> @@ -591,18 +583,14 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
> void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
> {
> struct amdgpu_ctx *ctx;
> - struct idr *idp;
> - uint32_t id;
> + unsigned long id;
>
> amdgpu_ctx_mgr_entity_fini(mgr);
>
> - idp = &mgr->ctx_handles;
> -
> - idr_for_each_entry(idp, ctx, id) {
> + xa_for_each(&mgr->ctx_handles, id, ctx) {
> if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
> DRM_ERROR("ctx %p is still alive\n", ctx);
> }
> -
> - idr_destroy(&mgr->ctx_handles);
> + xa_destroy(&mgr->ctx_handles);
> mutex_destroy(&mgr->lock);
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> index b3b012c0a7da..011b1f15308a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> @@ -55,7 +55,7 @@ struct amdgpu_ctx_mgr {
> struct amdgpu_device *adev;
> struct mutex lock;
> /* protected by lock */
> - struct idr ctx_handles;
> + struct xarray ctx_handles;
> };
>
> extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> index 1cafe8d83a4d..278b4bd98dcc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> @@ -57,14 +57,14 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
> struct drm_file *file;
> struct amdgpu_fpriv *fpriv;
> struct amdgpu_ctx *ctx;
> - uint32_t id;
> + unsigned long id;
>
> if (!filp)
> return -EINVAL;
>
> file = filp->private_data;
> fpriv = file->driver_priv;
> - idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
> + xa_for_each(&fpriv->ctx_mgr.ctx_handles, id, ctx)
> amdgpu_ctx_priority_override(ctx, priority);
>
> fput(filp);
More information about the dri-devel
mailing list