[RFC PATCH v3 08/17] drm/i915/vm_bind: Add out fence support
Niranjana Vishwanathapura
niranjana.vishwanathapura at intel.com
Wed Aug 31 06:22:43 UTC 2022
On Sat, Aug 27, 2022 at 09:43:54PM +0200, Andi Shyti wrote:
>From: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>
>Add support for handling out fence of vm_bind call.
>
>Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
>Signed-off-by: Andi Shyti <andi.shyti at linux.intel.com>
>---
> drivers/gpu/drm/i915/gem/i915_gem_vm_bind.h | 3 +
> .../drm/i915/gem/i915_gem_vm_bind_object.c | 82 +++++++++++++++++++
> drivers/gpu/drm/i915/i915_vma.c | 6 +-
> drivers/gpu/drm/i915/i915_vma_types.h | 7 ++
> 4 files changed, 97 insertions(+), 1 deletion(-)
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind.h b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind.h
>index ebc493b7dafc1..d65e6e4fb3972 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind.h
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind.h
>@@ -18,4 +18,7 @@ int i915_gem_vm_unbind_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
>
> void i915_gem_vm_unbind_vma_all(struct i915_address_space *vm);
>+void i915_vm_bind_signal_fence(struct i915_vma *vma,
>+ struct dma_fence * const fence);
>+
> #endif /* __I915_GEM_VM_BIND_H */
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>index 3b45529fe8d4c..e57b9c492a7f9 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>@@ -5,6 +5,8 @@
>
> #include <linux/interval_tree_generic.h>
>
>+#include <drm/drm_syncobj.h>
>+
> #include "gem/i915_gem_vm_bind.h"
> #include "gem/i915_gem_context.h"
> #include "gt/gen8_engine_cs.h"
>@@ -109,6 +111,67 @@ void i915_gem_vm_bind_remove(struct i915_vma *vma, bool release_obj)
> }
> }
>
>+static int i915_vm_bind_add_fence(struct drm_file *file, struct i915_vma *vma,
>+ u32 handle, u64 point)
>+{
>+ struct drm_syncobj *syncobj;
>+
>+ syncobj = drm_syncobj_find(file, handle);
>+ if (!syncobj) {
>+ DRM_DEBUG("Invalid syncobj handle provided\n");
>+ return -ENOENT;
>+ }
>+
>+ /*
>+ * For timeline syncobjs we need to preallocate chains for
>+ * later signaling.
>+ */
>+ if (point) {
>+ vma->vm_bind_fence.chain_fence = dma_fence_chain_alloc();
>+ if (!vma->vm_bind_fence.chain_fence) {
>+ drm_syncobj_put(syncobj);
>+ return -ENOMEM;
>+ }
>+ } else {
>+ vma->vm_bind_fence.chain_fence = NULL;
>+ }
>+ vma->vm_bind_fence.syncobj = syncobj;
>+ vma->vm_bind_fence.value = point;
>+
>+ return 0;
>+}
>+
>+static void i915_vm_bind_put_fence(struct i915_vma *vma)
>+{
>+ if (!vma->vm_bind_fence.syncobj)
>+ return;
>+
>+ drm_syncobj_put(vma->vm_bind_fence.syncobj);
>+ dma_fence_chain_free(vma->vm_bind_fence.chain_fence);
>+}
>+
>+void i915_vm_bind_signal_fence(struct i915_vma *vma,
>+ struct dma_fence * const fence)
>+{
>+ struct drm_syncobj *syncobj = vma->vm_bind_fence.syncobj;
>+
>+ if (!syncobj)
>+ return;
>+
>+ if (vma->vm_bind_fence.chain_fence) {
>+ drm_syncobj_add_point(syncobj,
>+ vma->vm_bind_fence.chain_fence,
>+ fence, vma->vm_bind_fence.value);
>+ /*
>+ * The chain's ownership is transferred to the
>+ * timeline.
>+ */
>+ vma->vm_bind_fence.chain_fence = NULL;
>+ } else {
>+ drm_syncobj_replace_fence(syncobj, fence);
>+ }
>+}
>+
> static int i915_gem_vm_unbind_vma(struct i915_address_space *vm,
> struct i915_vma *vma,
> struct drm_i915_gem_vm_unbind *va)
>@@ -243,6 +306,15 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
> goto unlock_vm;
> }
>
>+ if (va->fence.flags & I915_TIMELINE_FENCE_SIGNAL) {
>+ ret = i915_vm_bind_add_fence(file, vma, va->fence.handle,
>+ va->fence.value);
>+ if (ret)
>+ goto put_vma;
>+ }
>+
>+ pin_flags = va->start | PIN_OFFSET_FIXED | PIN_USER;
Setting pin_flags should part of patch #4.
>+
> for_i915_gem_ww(&ww, ret, true) {
> retry:
> ret = i915_gem_object_lock(vma->obj, &ww);
>@@ -267,12 +339,22 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
> ret = i915_gem_ww_ctx_backoff(&ww);
> if (!ret)
> goto retry;
>+
Redundent white space. remove.
> } else {
> /* Hold object reference until vm_unbind */
> i915_gem_object_get(vma->obj);
> }
> }
>
>+ if (va->fence.flags & I915_TIMELINE_FENCE_SIGNAL)
>+ i915_vm_bind_put_fence(vma);
>+
>+put_vma:
>+ if (ret && vma) {
>+ i915_vma_set_freed(vma);
>+ i915_vma_destroy(vma);
>+ }
>+
I think destroying vma upon error should be part of patch #4.
Niranjana
> unlock_vm:
> mutex_unlock(&vm->vm_bind_lock);
>
>diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>index 0eb7727d62a6f..6ca37ce2b35a8 100644
>--- a/drivers/gpu/drm/i915/i915_vma.c
>+++ b/drivers/gpu/drm/i915/i915_vma.c
>@@ -1542,8 +1542,12 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
> err_vma_res:
> i915_vma_resource_free(vma_res);
> err_fence:
>- if (work)
>+ if (work) {
>+ if (i915_vma_is_persistent(vma))
>+ i915_vm_bind_signal_fence(vma, &work->base.dma);
>+
> dma_fence_work_commit_imm(&work->base);
>+ }
> err_rpm:
> if (wakeref)
> intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
>diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
>index 5483ccf0c82c7..8bf870a0f689b 100644
>--- a/drivers/gpu/drm/i915/i915_vma_types.h
>+++ b/drivers/gpu/drm/i915/i915_vma_types.h
>@@ -318,6 +318,13 @@ struct i915_vma {
> /* @vm_rebind_link: link to vm_rebind_list and protected by vm_rebind_lock */
> struct list_head vm_rebind_link; /* Link in vm_rebind_list */
>
>+ /** Timeline fence for vm_bind completion notification */
>+ struct {
>+ struct drm_syncobj *syncobj;
>+ u64 value;
>+ struct dma_fence_chain *chain_fence;
>+ } vm_bind_fence;
>+
> /** Interval tree structures for persistent vma */
>
> /** @rb: node for the interval tree of vm for persistent vmas */
>--
>2.34.1
>
More information about the dri-devel
mailing list