[Intel-xe] [PATCH v3 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper
Matthew Brost
matthew.brost at intel.com
Thu Aug 31 17:51:33 UTC 2023
On Thu, Aug 31, 2023 at 11:29:35AM +0200, Thomas Hellström wrote:
> Replace the calls to ttm_eu_reserve_buffers() by using the drm_exec
> helper instead. Also make sure the locking loop covers any calls to
> xe_bo_validate() / ttm_bo_validate() so that these function calls may
> easily benefit from being called from within an unsealed locking
> transaction and may thus perform blocking dma_resv locks in the future.
>
> For the unlock we remove an assert that the vm->rebind_list is empty
> when locks are released. Since if the error path is hit with a partly
> locked list, that assert may no longer hold true we chose to remove it.
>
> v3:
> - Don't accept duplicate bo locks in the rebind worker.
>
> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/Kconfig | 1 +
> drivers/gpu/drm/xe/xe_exec.c | 71 +++------
> drivers/gpu/drm/xe/xe_vm.c | 280 ++++++++++++++++-------------------
> drivers/gpu/drm/xe/xe_vm.h | 22 +--
> 4 files changed, 157 insertions(+), 217 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
> index 0a4ea965645b..096bd066afa8 100644
> --- a/drivers/gpu/drm/xe/Kconfig
> +++ b/drivers/gpu/drm/xe/Kconfig
> @@ -8,6 +8,7 @@ config DRM_XE
> select SHMEM
> select TMPFS
> select DRM_BUDDY
> + select DRM_EXEC
> select DRM_KMS_HELPER
> select DRM_PANEL
> select DRM_SUBALLOC_HELPER
> diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
> index 8a5b614df090..b5058fb8b575 100644
> --- a/drivers/gpu/drm/xe/xe_exec.c
> +++ b/drivers/gpu/drm/xe/xe_exec.c
> @@ -6,6 +6,7 @@
> #include "xe_exec.h"
>
> #include <drm/drm_device.h>
> +#include <drm/drm_exec.h>
> #include <drm/drm_file.h>
> #include <drm/xe_drm.h>
> #include <linux/delay.h>
> @@ -93,25 +94,18 @@
> * Unlock all
> */
>
> -#define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000
> -
> -static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
> - struct ttm_validate_buffer tv_onstack[],
> - struct ttm_validate_buffer **tv,
> - struct list_head *objs)
> +static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
> {
> - struct xe_vm *vm = q->vm;
> struct xe_vma *vma;
> LIST_HEAD(dups);
> ktime_t end = 0;
> int err = 0;
>
> - *tv = NULL;
> - if (xe_vm_no_dma_fences(q->vm))
> + if (xe_vm_no_dma_fences(vm))
> return 0;
>
> retry:
> - err = xe_vm_lock_dma_resv(vm, ww, tv_onstack, tv, objs, true, 1);
> + err = xe_vm_lock_dma_resv(vm, exec, 1, true);
> if (err)
> return err;
>
> @@ -127,42 +121,16 @@ static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
> continue;
>
> err = xe_bo_validate(xe_vma_bo(vma), vm, false);
> - if (err) {
> - xe_vm_unlock_dma_resv(vm, tv_onstack, *tv, ww, objs);
> - *tv = NULL;
> + if (err)
> break;
> - }
> }
>
> - /*
> - * With multiple active VMs, under memory pressure, it is possible that
> - * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
> - * Until ttm properly handles locking in such scenarios, best thing the
> - * driver can do is retry with a timeout.
> - */
> - if (err == -ENOMEM) {
> - ktime_t cur = ktime_get();
> -
> - end = end ? : ktime_add_ms(cur, XE_EXEC_BIND_RETRY_TIMEOUT_MS);
> - if (ktime_before(cur, end)) {
> - msleep(20);
> - goto retry;
> - }
> - }
> + if (err && xe_vm_validate_should_retry(exec, err, &end))
> + goto retry;
>
> return err;
> }
>
> -static void xe_exec_end(struct xe_exec_queue *q,
> - struct ttm_validate_buffer *tv_onstack,
> - struct ttm_validate_buffer *tv,
> - struct ww_acquire_ctx *ww,
> - struct list_head *objs)
> -{
> - if (!xe_vm_no_dma_fences(q->vm))
> - xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
> -}
> -
> int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> {
> struct xe_device *xe = to_xe_device(dev);
> @@ -173,14 +141,11 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> struct xe_exec_queue *q;
> struct xe_sync_entry *syncs = NULL;
> u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
> - struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
> - struct ttm_validate_buffer *tv = NULL;
> + struct drm_exec exec;
> u32 i, num_syncs = 0;
> struct xe_sched_job *job;
> struct dma_fence *rebind_fence;
> struct xe_vm *vm;
> - struct ww_acquire_ctx ww;
> - struct list_head objs;
> bool write_locked;
> int err = 0;
>
> @@ -294,26 +259,30 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> goto err_unlock_list;
> }
>
> - err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs);
> - if (err)
> - goto err_unlock_list;
> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
> + drm_exec_until_all_locked(&exec) {
> + err = xe_exec_begin(&exec, vm);
> + drm_exec_retry_on_contention(&exec);
> + if (err)
> + goto err_exec;
> + }
>
> if (xe_vm_is_closed_or_banned(q->vm)) {
> drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
> err = -ECANCELED;
> - goto err_exec_queue_end;
> + goto err_exec;
> }
>
> if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
> err = -EWOULDBLOCK;
> - goto err_exec_queue_end;
> + goto err_exec;
> }
>
> job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
> addresses : &args->address);
> if (IS_ERR(job)) {
> err = PTR_ERR(job);
> - goto err_exec_queue_end;
> + goto err_exec;
> }
>
> /*
> @@ -412,8 +381,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> err_put_job:
> if (err)
> xe_sched_job_put(job);
> -err_exec_queue_end:
> - xe_exec_end(q, tv_onstack, tv, &ww, &objs);
> +err_exec:
> + drm_exec_fini(&exec);
> err_unlock_list:
> if (write_locked)
> up_write(&vm->lock);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index ac222cbe78f0..b95a43d0af59 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -7,6 +7,7 @@
>
> #include <linux/dma-fence-array.h>
>
> +#include <drm/drm_exec.h>
> #include <drm/drm_print.h>
> #include <drm/ttm/ttm_execbuf_util.h>
> #include <drm/ttm/ttm_tt.h>
> @@ -322,10 +323,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
>
> int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
> {
> - struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
> - struct ttm_validate_buffer *tv;
> - struct ww_acquire_ctx ww;
> - struct list_head objs;
> + struct drm_exec exec;
> struct dma_fence *pfence;
> int err;
> bool wait;
> @@ -333,10 +331,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
> XE_WARN_ON(!xe_vm_in_compute_mode(vm));
>
> down_write(&vm->lock);
> -
> - err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
> - if (err)
> - goto out_unlock_outer;
> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
> + drm_exec_until_all_locked(&exec) {
> + err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
> + drm_exec_retry_on_contention(&exec);
> + if (err)
> + goto out_unlock;
> + }
>
> pfence = xe_preempt_fence_create(q, q->compute.context,
> ++q->compute.seqno);
> @@ -368,8 +369,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
> up_read(&vm->userptr.notifier_lock);
>
> out_unlock:
> - xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
> -out_unlock_outer:
> + drm_exec_fini(&exec);
> up_write(&vm->lock);
>
> return err;
> @@ -398,68 +398,36 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
> * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
> * objects of the vm's external buffer objects.
> * @vm: The vm.
> - * @ww: Pointer to a struct ww_acquire_ctx locking context.
> - * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
> - * ttm_validate_buffers used for locking.
> - * @tv: Pointer to a pointer that on output contains the actual storage used.
> - * @objs: List head for the buffer objects locked.
> - * @intr: Whether to lock interruptible.
> + * @exec: Pointer to a struct drm_exec locking context.
> * @num_shared: Number of dma-fence slots to reserve in the locked objects.
> + * @lock_vm: Lock also the vm's dma_resv.
> *
> * Locks the vm dma-resv objects and all the dma-resv objects of the
> - * buffer objects on the vm external object list. The TTM utilities require
> - * a list of struct ttm_validate_buffers pointing to the actual buffer
> - * objects to lock. Storage for those struct ttm_validate_buffers should
> - * be provided in @tv_onstack, and is typically reserved on the stack
> - * of the caller. If the size of @tv_onstack isn't sufficient, then
> - * storage will be allocated internally using kvmalloc().
> - *
> - * The function performs deadlock handling internally, and after a
> - * successful return the ww locking transaction should be considered
> - * sealed.
> + * buffer objects on the vm external object list.
> *
> * Return: 0 on success, Negative error code on error. In particular if
> - * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
> - * of error, any locking performed has been reverted.
> + * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
> */
> -int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
> - struct ttm_validate_buffer *tv_onstack,
> - struct ttm_validate_buffer **tv,
> - struct list_head *objs,
> - bool intr,
> - unsigned int num_shared)
> -{
> - struct ttm_validate_buffer *tv_vm, *tv_bo;
> +int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
> + unsigned int num_shared, bool lock_vm)
> +{
> struct xe_vma *vma, *next;
> - LIST_HEAD(dups);
> - int err;
> + int err = 0;
>
> lockdep_assert_held(&vm->lock);
>
> - if (vm->extobj.entries < XE_ONSTACK_TV) {
> - tv_vm = tv_onstack;
> - } else {
> - tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
> - GFP_KERNEL);
> - if (!tv_vm)
> - return -ENOMEM;
> + if (lock_vm) {
> + err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base,
> + num_shared);
> + if (err)
> + return err;
> }
> - tv_bo = tv_vm + 1;
>
> - INIT_LIST_HEAD(objs);
> list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
> - tv_bo->num_shared = num_shared;
> - tv_bo->bo = &xe_vma_bo(vma)->ttm;
> -
> - list_add_tail(&tv_bo->head, objs);
> - tv_bo++;
> + err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
> + if (err)
> + return err;
> }
> - tv_vm->num_shared = num_shared;
> - tv_vm->bo = xe_vm_ttm_bo(vm);
> - list_add_tail(&tv_vm->head, objs);
> - err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
> - if (err)
> - goto out_err;
>
> spin_lock(&vm->notifier.list_lock);
> list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
> @@ -473,45 +441,7 @@ int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
> }
> spin_unlock(&vm->notifier.list_lock);
>
> - *tv = tv_vm;
> return 0;
> -
> -out_err:
> - if (tv_vm != tv_onstack)
> - kvfree(tv_vm);
> -
> - return err;
> -}
> -
> -/**
> - * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
> - * xe_vm_lock_dma_resv()
> - * @vm: The vm.
> - * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
> - * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
> - * @ww: The ww_acquire_context used for locking.
> - * @objs: The list returned from xe_vm_lock_dma_resv().
> - *
> - * Unlocks the reservation objects and frees any memory allocated by
> - * xe_vm_lock_dma_resv().
> - */
> -void xe_vm_unlock_dma_resv(struct xe_vm *vm,
> - struct ttm_validate_buffer *tv_onstack,
> - struct ttm_validate_buffer *tv,
> - struct ww_acquire_ctx *ww,
> - struct list_head *objs)
> -{
> - /*
> - * Nothing should've been able to enter the list while we were locked,
> - * since we've held the dma-resvs of all the vm's external objects,
> - * and holding the dma_resv of an object is required for list
> - * addition, and we shouldn't add ourselves.
> - */
> - XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
> -
> - ttm_eu_backoff_reservation(ww, objs);
> - if (tv && tv != tv_onstack)
> - kvfree(tv);
> }
>
> #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
> @@ -533,18 +463,108 @@ static void xe_vm_kill(struct xe_vm *vm)
> /* TODO: Inform user the VM is banned */
> }
>
> +/**
> + * xe_vm_validate_should_retry() - Whether to retry after a validate error.
> + * @exec: The drm_exec object used for locking before validation.
> + * @err: The error returned from ttm_bo_validate().
> + * @end: A ktime_t cookie that should be set to 0 before first use and
> + * that should be reused on subsequent calls.
> + *
> + * With multiple active VMs, under memory pressure, it is possible that
> + * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
> + * Until ttm properly handles locking in such scenarios, best thing the
> + * driver can do is retry with a timeout. Check if that is necessary, and
> + * if so unlock the drm_exec's objects while keeping the ticket to prepare
> + * for a rerun.
> + *
> + * Return: true if a retry after drm_exec_init() is recommended;
> + * false otherwise.
> + */
> +bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
> +{
> + struct drm_gem_object *obj;
> + unsigned long index;
> + ktime_t cur;
> +
> + if (err != -ENOMEM)
> + return false;
> +
> + cur = ktime_get();
> + *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
> + if (!ktime_before(cur, *end))
> + return false;
> +
> + /*
> + * FIXME: Open-code drm_exec_unlock_all().
> + * We don't want to release the ww ticket.
> + */
> + drm_exec_for_each_locked_object(exec, index, obj) {
> + dma_resv_unlock(obj->resv);
> + drm_gem_object_put(obj);
> + }
> + drm_gem_object_put(exec->prelocked);
> + exec->prelocked = NULL;
> + exec->num_objects = 0;
> +
> + msleep(20);
> + return true;
> +}
> +
> +static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
> + bool *done)
> +{
> + struct xe_vma *vma;
> + ktime_t end = 0;
> + int err;
> +
> +retry:
> + err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base,
> + vm->preempt.num_exec_queues);
> + if (err)
> + return err;
> +
> + if (xe_vm_is_idle(vm)) {
> + vm->preempt.rebind_deactivated = true;
> + *done = true;
> + return 0;
> + }
> +
> + if (!preempt_fences_waiting(vm)) {
> + *done = true;
> + return 0;
> + }
> +
> + err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
> + if (err)
> + return err;
> +
> + err = wait_for_existing_preempt_fences(vm);
> + if (err)
> + return err;
> +
> + list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
> + if (xe_vma_has_no_bo(vma) ||
> + vma->gpuva.flags & XE_VMA_DESTROYED)
> + continue;
> +
> + err = xe_bo_validate(xe_vma_bo(vma), vm, false);
> + if (err)
> + break;
> + }
> +
> + if (err && xe_vm_validate_should_retry(exec, err, &end))
> + goto retry;
> +
> + return err;
> +}
> +
> static void preempt_rebind_work_func(struct work_struct *w)
> {
> struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
> - struct xe_vma *vma;
> - struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
> - struct ttm_validate_buffer *tv;
> - struct ww_acquire_ctx ww;
> - struct list_head objs;
> + struct drm_exec exec;
> struct dma_fence *rebind_fence;
> unsigned int fence_count = 0;
> LIST_HEAD(preempt_fences);
> - ktime_t end = 0;
> int err;
> long wait;
> int __maybe_unused tries = 0;
> @@ -583,42 +603,21 @@ static void preempt_rebind_work_func(struct work_struct *w)
> goto out_unlock_outer;
> }
>
> - err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
> - false, vm->preempt.num_exec_queues);
> - if (err)
> - goto out_unlock_outer;
> -
> - if (xe_vm_is_idle(vm)) {
> - vm->preempt.rebind_deactivated = true;
> - goto out_unlock;
> - }
> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
>
> - /* Fresh preempt fences already installed. Everyting is running. */
> - if (!preempt_fences_waiting(vm))
> - goto out_unlock;
> + drm_exec_until_all_locked(&exec) {
> + bool done = false;
>
> - /*
> - * This makes sure vm is completely suspended and also balances
> - * xe_engine suspend- and resume; we resume *all* vm engines below.
> - */
> - err = wait_for_existing_preempt_fences(vm);
> - if (err)
> - goto out_unlock;
> + err = xe_preempt_work_begin(&exec, vm, &done);
> + drm_exec_retry_on_contention(&exec);
> + if (err || done)
> + goto out_unlock;
> + }
>
> err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
> if (err)
> goto out_unlock;
>
> - list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
> - if (xe_vma_has_no_bo(vma) ||
> - vma->gpuva.flags & XE_VMA_DESTROYED)
> - continue;
> -
> - err = xe_bo_validate(xe_vma_bo(vma), vm, false);
> - if (err)
> - goto out_unlock;
> - }
> -
> rebind_fence = xe_vm_rebind(vm, true);
> if (IS_ERR(rebind_fence)) {
> err = PTR_ERR(rebind_fence);
> @@ -663,30 +662,13 @@ static void preempt_rebind_work_func(struct work_struct *w)
> up_read(&vm->userptr.notifier_lock);
>
> out_unlock:
> - xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
> + drm_exec_fini(&exec);
> out_unlock_outer:
> if (err == -EAGAIN) {
> trace_xe_vm_rebind_worker_retry(vm);
> goto retry;
> }
>
> - /*
> - * With multiple active VMs, under memory pressure, it is possible that
> - * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
> - * Until ttm properly handles locking in such scenarios, best thing the
> - * driver can do is retry with a timeout. Killing the VM or putting it
> - * in error state after timeout or other error scenarios is still TBD.
> - */
> - if (err == -ENOMEM) {
> - ktime_t cur = ktime_get();
> -
> - end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
> - if (ktime_before(cur, end)) {
> - msleep(20);
> - trace_xe_vm_rebind_worker_retry(vm);
> - goto retry;
> - }
> - }
> if (err) {
> drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
> xe_vm_kill(vm);
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index d7d8fd7bd8da..4a1dd11f71c5 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -21,6 +21,7 @@ struct ttm_validate_buffer;
> struct xe_exec_queue;
> struct xe_file;
> struct xe_sync_entry;
> +struct drm_exec;
>
> struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
> void xe_vm_free(struct kref *ref);
> @@ -211,23 +212,10 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma);
>
> int xe_vma_userptr_check_repin(struct xe_vma *vma);
>
> -/*
> - * XE_ONSTACK_TV is used to size the tv_onstack array that is input
> - * to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv().
> - */
> -#define XE_ONSTACK_TV 20
> -int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
> - struct ttm_validate_buffer *tv_onstack,
> - struct ttm_validate_buffer **tv,
> - struct list_head *objs,
> - bool intr,
> - unsigned int num_shared);
> -
> -void xe_vm_unlock_dma_resv(struct xe_vm *vm,
> - struct ttm_validate_buffer *tv_onstack,
> - struct ttm_validate_buffer *tv,
> - struct ww_acquire_ctx *ww,
> - struct list_head *objs);
> +bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
> +
> +int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
> + unsigned int num_shared, bool lock_vm);
>
> void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
> enum dma_resv_usage usage);
> --
> 2.41.0
>
More information about the Intel-xe
mailing list