[Intel-xe] [PATCH v3 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec

Thomas Hellström thomas.hellstrom at linux.intel.com
Thu Aug 31 14:42:37 UTC 2023


On Thu, 2023-08-31 at 11:29 +0200, Thomas Hellström wrote:
> The VM_BIND functionality and vma destruction was locking
> potentially multiple dma_resv objects using the
> ttm_eu_reserve_buffers() function. Rework those to use the drm_exec
> helper, taking care that any calls to xe_bo_validate() ends up
> inside an unsealed locking transaction.
> 
> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 91 +++++++++++++++---------------------
> --
>  1 file changed, 36 insertions(+), 55 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 1547467c7d92..defa4299f172 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1102,27 +1102,21 @@ int xe_vm_prepare_vma(struct drm_exec *exec,
> struct xe_vma *vma,
>  
>  static void xe_vma_destroy_unlocked(struct xe_vma *vma)
>  {
> -       struct ttm_validate_buffer tv[2];
> -       struct ww_acquire_ctx ww;
>         struct xe_bo *bo = xe_vma_bo(vma);
> -       LIST_HEAD(objs);
> -       LIST_HEAD(dups);
> +       struct drm_exec exec;
>         int err;
>  
> -       memset(tv, 0, sizeof(tv));
> -       tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
> -       list_add(&tv[0].head, &objs);
> -
> -       if (bo) {
> -               tv[1].bo = &xe_bo_get(bo)->ttm;
> -               list_add(&tv[1].head, &objs);
> +       drm_exec_init(&exec, 0);
> +       drm_exec_until_all_locked(&exec) {
> +               err = xe_vm_prepare_vma(&exec, vma, 0);
> +               drm_exec_retry_on_contention(&exec);
> +               if (XE_WARN_ON(err))
> +                       break;
>         }
> -       err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
> -       XE_WARN_ON(err);
>  
>         xe_vma_destroy(vma, NULL);
>  
> -       ttm_eu_backoff_reservation(&ww, &objs);
> +       drm_exec_fini(&exec);
>         if (bo)
>                 xe_bo_put(bo);

So IGT helped pinpoint that we shouldn't put the bo here, since
drm_exec is holding the needed refcount which is released in
drm_exe_fini(). Leads to uaf.


>  }
> @@ -2138,12 +2132,6 @@ struct ttm_buffer_object *xe_vm_ttm_bo(struct
> xe_vm *vm)
>         return &vm->pt_root[idx]->bo->ttm;
>  }
>  
> -static void xe_vm_tv_populate(struct xe_vm *vm, struct
> ttm_validate_buffer *tv)
> -{
> -       tv->num_shared = 1;
> -       tv->bo = xe_vm_ttm_bo(vm);
> -}
> -
>  static void vm_set_async_error(struct xe_vm *vm, int err)
>  {
>         lockdep_assert_held(&vm->lock);
> @@ -2644,42 +2632,16 @@ static int xe_vma_op_commit(struct xe_vm *vm,
> struct xe_vma_op *op)
>         return err;
>  }
>  
> -static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> -                              struct xe_vma_op *op)
> +static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
> +                     struct xe_vma *vma, struct xe_vma_op *op)
>  {
> -       LIST_HEAD(objs);
> -       LIST_HEAD(dups);
> -       struct ttm_validate_buffer tv_bo, tv_vm;
> -       struct ww_acquire_ctx ww;
> -       struct xe_bo *vbo;
>         int err;
>  
>         lockdep_assert_held_write(&vm->lock);
>  
> -       xe_vm_tv_populate(vm, &tv_vm);
> -       list_add_tail(&tv_vm.head, &objs);
> -       vbo = xe_vma_bo(vma);
> -       if (vbo) {
> -               /*
> -                * An unbind can drop the last reference to the BO
> and
> -                * the BO is needed for ttm_eu_backoff_reservation so
> -                * take a reference here.
> -                */
> -               xe_bo_get(vbo);
> -
> -               if (!vbo->vm) {
> -                       tv_bo.bo = &vbo->ttm;
> -                       tv_bo.num_shared = 1;
> -                       list_add(&tv_bo.head, &objs);
> -               }
> -       }
> -
> -again:
> -       err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
> -       if (err) {
> -               xe_bo_put(vbo);
> +       err = xe_vm_prepare_vma(exec, vma, 1);
> +       if (err)
>                 return err;
> -       }
>  
>         xe_vm_assert_held(vm);
>         xe_bo_assert_held(xe_vma_bo(vma));
> @@ -2758,17 +2720,36 @@ static int __xe_vma_op_execute(struct xe_vm
> *vm, struct xe_vma *vma,
>                 XE_WARN_ON("NOT POSSIBLE");
>         }
>  
> -       ttm_eu_backoff_reservation(&ww, &objs);
> +       if (err)
> +               trace_xe_vma_fail(vma);
> +
> +       return err;
> +}
> +
> +static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> +                              struct xe_vma_op *op)
> +{
> +       struct drm_exec exec;
> +       int err;
> +
> +retry_userptr:
> +       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
> +       drm_exec_until_all_locked(&exec) {
> +               err = op_execute(&exec, vm, vma, op);
> +               drm_exec_retry_on_contention(&exec);
> +               if (err)
> +                       break;
> +       }
> +       drm_exec_fini(&exec);
> +
>         if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
>                 lockdep_assert_held_write(&vm->lock);
>                 err = xe_vma_userptr_pin_pages(vma);
>                 if (!err)
> -                       goto again;
> -       }
> -       xe_bo_put(vbo);
> +                       goto retry_userptr;
>  
> -       if (err)
>                 trace_xe_vma_fail(vma);
> +       }
>  
>         return err;
>  }



More information about the Intel-xe mailing list