<div dir="ltr"><div dir="ltr"><br></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Fri, Dec 13, 2019 at 4:07 PM Niranjana Vishwanathapura <<a href="mailto:niranjana.vishwanathapura@intel.com">niranjana.vishwanathapura@intel.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Shared Virtual Memory (SVM) runtime allocator support allows<br>
binding a shared virtual address to a buffer object (BO) in the<br>
device page table through an ioctl call.<br>
<br>
Cc: Joonas Lahtinen <<a href="mailto:joonas.lahtinen@linux.intel.com" target="_blank">joonas.lahtinen@linux.intel.com</a>><br>
Cc: Jon Bloomfield <<a href="mailto:jon.bloomfield@intel.com" target="_blank">jon.bloomfield@intel.com</a>><br>
Cc: Daniel Vetter <<a href="mailto:daniel.vetter@intel.com" target="_blank">daniel.vetter@intel.com</a>><br>
Cc: Sudeep Dutt <<a href="mailto:sudeep.dutt@intel.com" target="_blank">sudeep.dutt@intel.com</a>><br>
Signed-off-by: Niranjana Vishwanathapura <<a href="mailto:niranjana.vishwanathapura@intel.com" target="_blank">niranjana.vishwanathapura@intel.com</a>><br>
---<br>
 drivers/gpu/drm/i915/Kconfig                  | 11 ++++<br>
 drivers/gpu/drm/i915/Makefile                 |  3 +<br>
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 58 ++++++++++++++----<br>
 drivers/gpu/drm/i915/gem/i915_gem_svm.c       | 60 +++++++++++++++++++<br>
 drivers/gpu/drm/i915/gem/i915_gem_svm.h       | 22 +++++++<br>
 drivers/gpu/drm/i915/i915_drv.c               | 21 +++++++<br>
 drivers/gpu/drm/i915/i915_drv.h               | 22 +++++++<br>
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  1 +<br>
 drivers/gpu/drm/i915/i915_gem_gtt.h           | 13 ++++<br>
 include/uapi/drm/i915_drm.h                   | 27 +++++++++<br>
 10 files changed, 227 insertions(+), 11 deletions(-)<br>
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_svm.c<br>
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_svm.h<br>
<br>
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig<br>
index ba9595960bbe..c2e48710eec8 100644<br>
--- a/drivers/gpu/drm/i915/Kconfig<br>
+++ b/drivers/gpu/drm/i915/Kconfig<br>
@@ -137,6 +137,16 @@ config DRM_I915_GVT_KVMGT<br>
          Choose this option if you want to enable KVMGT support for<br>
          Intel GVT-g.<br>
<br>
+config DRM_I915_SVM<br>
+       bool "Enable Shared Virtual Memory support in i915"<br>
+       depends on STAGING<br>
+       depends on DRM_I915<br>
+       default n<br>
+       help<br>
+         Choose this option if you want Shared Virtual Memory (SVM)<br>
+         support in i915. With SVM support, one can share the virtual<br>
+         address space between a process and the GPU.<br>
+<br>
 menu "drm/i915 Debugging"<br>
 depends on DRM_I915<br>
 depends on EXPERT<br>
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile<br>
index e0fd10c0cfb8..75fe45633779 100644<br>
--- a/drivers/gpu/drm/i915/Makefile<br>
+++ b/drivers/gpu/drm/i915/Makefile<br>
@@ -153,6 +153,9 @@ i915-y += \<br>
          intel_region_lmem.o \<br>
          intel_wopcm.o<br>
<br>
+# SVM code<br>
+i915-$(CONFIG_DRM_I915_SVM) += gem/i915_gem_svm.o<br>
+<br>
 # general-purpose microcontroller (GuC) support<br>
 obj-y += gt/uc/<br>
 i915-y += gt/uc/intel_uc.o \<br>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c<br>
index 5003e616a1ad..af360238a392 100644<br>
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c<br>
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c<br>
@@ -2836,10 +2836,14 @@ int<br>
 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,<br>
                           struct drm_file *file)<br>
 {<br>
+       struct drm_i915_gem_exec_object2 *exec2_list, *exec2_list_user;<br>
        struct drm_i915_gem_execbuffer2 *args = data;<br>
-       struct drm_i915_gem_exec_object2 *exec2_list;<br>
-       struct drm_syncobj **fences = NULL;<br>
        const size_t count = args->buffer_count;<br>
+       struct drm_syncobj **fences = NULL;<br>
+       unsigned int i = 0, svm_count = 0;<br>
+       struct i915_address_space *vm;<br>
+       struct i915_gem_context *ctx;<br>
+       struct i915_svm_obj *svm_obj;<br>
        int err;<br>
<br>
        if (!check_buffer_count(count)) {<br>
@@ -2851,15 +2855,46 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,<br>
        if (err)<br>
                return err;<br>
<br>
+       ctx = i915_gem_context_lookup(file->driver_priv, args->rsvd1);<br>
+       if (!ctx || !rcu_access_pointer(ctx->vm))<br>
+               return -ENOENT;<br>
+<br>
+       rcu_read_lock();<br>
+       vm = i915_vm_get(ctx->vm);<br>
+       rcu_read_unlock();<br>
+<br>
+alloc_again:<br>
+       svm_count = vm->svm_count;<br>
        /* Allocate an extra slot for use by the command parser */<br>
-       exec2_list = kvmalloc_array(count + 1, eb_element_size(),<br>
+       exec2_list = kvmalloc_array(count + svm_count + 1, eb_element_size(),<br>
                                    __GFP_NOWARN | GFP_KERNEL);<br>
        if (exec2_list == NULL) {<br>
                DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",<br>
-                         count);<br>
+                         count + svm_count);<br>
                return -ENOMEM;<br>
        }<br>
-       if (copy_from_user(exec2_list,<br>
+       mutex_lock(&vm->mutex);<br>
+       if (svm_count != vm->svm_count) {<br>
+               mutex_unlock(&vm->mutex);<br>
+               kvfree(exec2_list);<br>
+               goto alloc_again;<br>
+       }<br>
+<br>
+       list_for_each_entry(svm_obj, &vm->svm_list, link) {<br>
+               memset(&exec2_list[i], 0, sizeof(*exec2_list));<br>
+               exec2_list[i].handle = svm_obj->handle;<br>
+               exec2_list[i].offset = svm_obj->offset;<br>
+               exec2_list[i].flags = EXEC_OBJECT_PINNED |<br>
+                                     EXEC_OBJECT_SUPPORTS_48B_ADDRESS;<br>
+               i++;<br>
+       }<br>
+       exec2_list_user = &exec2_list[i];<br>
+       args->buffer_count += svm_count;<br>
+       mutex_unlock(&vm->mutex);<br>
+       i915_vm_put(vm);<br>
+       i915_gem_context_put(ctx);<br>
+<br>
+       if (copy_from_user(exec2_list_user,<br>
                           u64_to_user_ptr(args->buffers_ptr),<br>
                           sizeof(*exec2_list) * count)) {<br>
                DRM_DEBUG("copy %zd exec entries failed\n", count);<br>
@@ -2876,6 +2911,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,<br>
        }<br>
<br>
        err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);<br>
+       args->buffer_count -= svm_count;<br>
<br>
        /*<br>
         * Now that we have begun execution of the batchbuffer, we ignore<br>
@@ -2886,7 +2922,6 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,<br>
        if (args->flags & __EXEC_HAS_RELOC) {<br>
                struct drm_i915_gem_exec_object2 __user *user_exec_list =<br>
                        u64_to_user_ptr(args->buffers_ptr);<br>
-               unsigned int i;<br>
<br>
                /* Copy the new buffer offsets back to the user's exec list. */<br>
                /*<br>
@@ -2900,13 +2935,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,<br>
                        goto end;<br>
<br>
                for (i = 0; i < args->buffer_count; i++) {<br>
-                       if (!(exec2_list[i].offset & UPDATE))<br>
+                       u64 *offset = &exec2_list_user[i].offset;<br>
+<br>
+                       if (!(*offset & UPDATE))<br>
                                continue;<br>
<br>
-                       exec2_list[i].offset =<br>
-                               gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);<br>
-                       unsafe_put_user(exec2_list[i].offset,<br>
-                                       &user_exec_list[i].offset,<br>
+                       *offset = gen8_canonical_addr(*offset &<br>
+                                                     PIN_OFFSET_MASK);<br>
+                       unsafe_put_user(*offset, &user_exec_list[i].offset,<br>
                                        end_user);<br>
                }<br>
 end_user:<br>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_svm.c b/drivers/gpu/drm/i915/gem/i915_gem_svm.c<br>
new file mode 100644<br>
index 000000000000..882fe56138e2<br>
--- /dev/null<br>
+++ b/drivers/gpu/drm/i915/gem/i915_gem_svm.c<br>
@@ -0,0 +1,60 @@<br>
+// SPDX-License-Identifier: MIT<br>
+/*<br>
+ * Copyright © 2019 Intel Corporation<br>
+ */<br>
+<br>
+#include "i915_drv.h"<br>
+#include "i915_gem_gtt.h"<br>
+#include "i915_gem_lmem.h"<br>
+<br>
+int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,<br>
+                            struct drm_i915_gem_vm_bind *args,<br>
+                            struct drm_file *file)<br>
+{<br>
+       struct i915_svm_obj *svm_obj, *tmp;<br>
+       struct drm_i915_gem_object *obj;<br>
+       int ret = 0;<br>
+<br>
+       obj = i915_gem_object_lookup(file, args->handle);<br>
+       if (!obj)<br>
+               return -ENOENT;<br>
+<br>
+       /* For dgfx, ensure the obj is in device local memory only */<br>
+       if (IS_DGFX(vm->i915) && !i915_gem_object_is_lmem(obj))<br>
+               return -EINVAL;<br>
+<br>
+       /* FIXME: Need to handle case with unending batch buffers */<br>
+       if (!(args->flags & I915_GEM_VM_BIND_UNBIND)) {<br>
+               svm_obj = kmalloc(sizeof(*svm_obj), GFP_KERNEL);<br>
+               if (!svm_obj) {<br>
+                       ret = -ENOMEM;<br>
+                       goto put_obj;<br>
+               }<br>
+               svm_obj->handle = args->handle;<br>
+               svm_obj->offset = args->start;<br>
+       }<br>
+<br>
+       mutex_lock(&vm->mutex);<br>
+       if (!(args->flags & I915_GEM_VM_BIND_UNBIND)) {<br>
+               list_add(&svm_obj->link, &vm->svm_list);<br>
+               vm->svm_count++;<br>
+       } else {<br>
+               /*<br>
+                * FIXME: Need to handle case where object is migrated/closed<br>
+                * without unbinding first.<br>
+                */<br>
+               list_for_each_entry_safe(svm_obj, tmp, &vm->svm_list, link) {<br>
+                       if (svm_obj->handle != args->handle)<br>
+                               continue;<br>
+<br>
+                       list_del_init(&svm_obj->link);<br>
+                       vm->svm_count--;<br>
+                       kfree(svm_obj);<br>
+                       break;<br>
+               }<br>
+       }<br>
+       mutex_unlock(&vm->mutex);<br>
+put_obj:<br>
+       i915_gem_object_put(obj);<br>
+       return ret;<br>
+}<br>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_svm.h b/drivers/gpu/drm/i915/gem/i915_gem_svm.h<br>
new file mode 100644<br>
index 000000000000..d60b35c7d21a<br>
--- /dev/null<br>
+++ b/drivers/gpu/drm/i915/gem/i915_gem_svm.h<br>
@@ -0,0 +1,22 @@<br>
+/* SPDX-License-Identifier: MIT */<br>
+/*<br>
+ * Copyright © 2019 Intel Corporation<br>
+ */<br>
+<br>
+#ifndef __I915_GEM_SVM_H<br>
+#define __I915_GEM_SVM_H<br>
+<br>
+#include "i915_drv.h"<br>
+<br>
+#if defined(CONFIG_DRM_I915_SVM)<br>
+int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,<br>
+                            struct drm_i915_gem_vm_bind *args,<br>
+                            struct drm_file *file);<br>
+#else<br>
+static inline int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,<br>
+                                          struct drm_i915_gem_vm_bind *args,<br>
+                                          struct drm_file *file)<br>
+{ return -ENOTSUPP; }<br>
+#endif<br>
+<br>
+#endif /* __I915_GEM_SVM_H */<br>
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c<br>
index 2a11f60c4fd2..d452ea8e40b3 100644<br>
--- a/drivers/gpu/drm/i915/i915_drv.c<br>
+++ b/drivers/gpu/drm/i915/i915_drv.c<br>
@@ -2680,6 +2680,26 @@ i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,<br>
        return -ENODEV;<br>
 }<br>
<br>
+static int i915_gem_vm_bind_ioctl(struct drm_device *dev, void *data,<br>
+                                 struct drm_file *file)<br>
+{<br>
+       struct drm_i915_gem_vm_bind *args = data;<br>
+       struct i915_address_space *vm;<br>
+       int ret = -EINVAL;<br>
+<br>
+       vm = i915_gem_address_space_lookup(file->driver_priv, args->vm_id);<br>
+       if (unlikely(!vm))<br>
+               return -ENOENT;<br>
+<br>
+       switch (args->type) {<br>
+       case I915_GEM_VM_BIND_SVM_OBJ:<br>
+               ret = i915_gem_vm_bind_svm_obj(vm, args, file);<br>
+       }<br>
+<br>
+       i915_vm_put(vm);<br>
+       return ret;<br>
+}<br>
+<br>
 static const struct drm_ioctl_desc i915_ioctls[] = {<br>
        DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),<br>
        DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),<br>
@@ -2739,6 +2759,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {<br>
        DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),<br>
        DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),<br>
        DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),<br>
+       DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl, DRM_RENDER_ALLOW),<br>
 };<br>
<br>
 static struct drm_driver driver = {<br>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h<br>
index ce130e1f1e47..2d0a7cd2dc44 100644<br>
--- a/drivers/gpu/drm/i915/i915_drv.h<br>
+++ b/drivers/gpu/drm/i915/i915_drv.h<br>
@@ -1909,6 +1909,28 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)<br>
        return ctx;<br>
 }<br>
<br>
+static inline struct i915_address_space *<br>
+__i915_gem_address_space_lookup_rcu(struct drm_i915_file_private *file_priv,<br>
+                                   u32 id)<br>
+{<br>
+       return idr_find(&file_priv->vm_idr, id);<br>
+}<br>
+<br>
+static inline struct i915_address_space *<br>
+i915_gem_address_space_lookup(struct drm_i915_file_private *file_priv,<br>
+                             u32 id)<br>
+{<br>
+       struct i915_address_space *vm;<br>
+<br>
+       rcu_read_lock();<br>
+       vm = __i915_gem_address_space_lookup_rcu(file_priv, id);<br>
+       if (vm)<br>
+               vm = i915_vm_get(vm);<br>
+       rcu_read_unlock();<br>
+<br>
+       return vm;<br>
+}<br>
+<br>
 /* i915_gem_evict.c */<br>
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,<br>
                                          u64 min_size, u64 alignment,<br>
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c<br>
index be36719e7987..7d4f5fa84b02 100644<br>
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c<br>
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c<br>
@@ -586,6 +586,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)<br>
        stash_init(&vm->free_pages);<br>
<br>
        INIT_LIST_HEAD(&vm->bound_list);<br>
+       INIT_LIST_HEAD(&vm->svm_list);<br>
 }<br>
<br>
 static int __setup_page_dma(struct i915_address_space *vm,<br>
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h<br>
index 31a4a96ddd0d..7c1b54c9677d 100644<br>
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h<br>
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h<br>
@@ -285,6 +285,13 @@ struct pagestash {<br>
        struct pagevec pvec;<br>
 };<br>
<br>
+struct i915_svm_obj {<br>
+       /** This obj's place in the SVM object list */<br>
+       struct list_head link;<br>
+       u32 handle;<br>
+       u64 offset;<br>
+};<br>
+<br>
 struct i915_address_space {<br>
        struct kref ref;<br>
        struct rcu_work rcu;<br>
@@ -329,6 +336,12 @@ struct i915_address_space {<br>
         */<br>
        struct list_head bound_list;<br>
<br>
+       /**<br>
+        * List of SVM bind objects.<br>
+        */<br>
+       struct list_head svm_list;<br>
+       unsigned int svm_count;<br>
+<br>
        struct pagestash free_pages;<br>
<br>
        /* Global GTT */<br>
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h<br>
index 20314eea632a..e10d7bf2cf9f 100644<br>
--- a/include/uapi/drm/i915_drm.h<br>
+++ b/include/uapi/drm/i915_drm.h<br>
@@ -360,6 +360,7 @@ typedef struct _drm_i915_sarea {<br>
 #define DRM_I915_GEM_VM_CREATE         0x3a<br>
 #define DRM_I915_GEM_VM_DESTROY                0x3b<br>
 #define DRM_I915_GEM_OBJECT_SETPARAM   DRM_I915_GEM_CONTEXT_SETPARAM<br>
+#define DRM_I915_GEM_VM_BIND           0x3c<br>
 /* Must be kept compact -- no holes */<br>
<br>
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)<br>
@@ -424,6 +425,7 @@ typedef struct _drm_i915_sarea {<br>
 #define DRM_IOCTL_I915_GEM_VM_CREATE   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)<br>
 #define DRM_IOCTL_I915_GEM_VM_DESTROY  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)<br>
 #define DRM_IOCTL_I915_GEM_OBJECT_SETPARAM     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_OBJECT_SETPARAM, struct drm_i915_gem_object_param)<br>
+#define DRM_IOCTL_I915_GEM_VM_BIND             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)<br>
<br>
 /* Allow drivers to submit batchbuffers directly to hardware, relying<br>
  * on the security mechanisms provided by hardware.<br>
@@ -2300,6 +2302,31 @@ struct drm_i915_query_perf_config {<br>
        __u8 data[];<br>
 };<br>
<br>
+/**<br>
+ * struct drm_i915_gem_vm_bind<br>
+ *<br>
+ * Bind an object in a vm's page table.<br></blockquote><div><br></div><div>First off, this is something I've wanted for a while for Vulkan, it's just never made its way high enough up the priority list.  However, it's going to have to come one way or another soon.  I'm glad to see kernel API for this being proposed.</div><div><br></div><div>I do, however, have a few high-level comments/questions about the API:</div><div><br></div><div> 1. In order to be useful for sparse memory support, the API has to go the other way around so that it binds a VA range to a range within the BO.  It also needs to be able to handle overlapping where two different VA ranges may map to the same underlying bytes in the BO.  This likely means that unbind needs to also take a VA range and only unbind that range.</div><div><br></div><div> 2. If this is going to be useful for managing GL's address space where we have lots of BOs, we probably want it to take a list of ranges so we aren't making one ioctl for each thing we want to bind.<br></div><div><br></div><div> 3. Why are there no ways to synchronize this with anything?  For binding, this probably isn't really needed as long as the VA range you're binding is empty.  However, if you want to move bindings around or unbind something, the only option is to block in userspace and then call bind/unbind.  This can be done but it means even more threads in the UMD which is unpleasant.  One could argue that that's more or less what the kernel is going to have to do so we may as well do it in userspace.  However, I'm not 100% convinced that's true.</div><div><br></div><div>--Jason</div><div><br></div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
+ */<br>
+struct drm_i915_gem_vm_bind {<br>
+       /** VA start to bind **/<br>
+       __u64 start;<br>
+<br>
+       /** Type of memory to [un]bind **/<br>
+       __u32 type;<br>
+#define I915_GEM_VM_BIND_SVM_OBJ      0<br>
+<br>
+       /** Object handle to [un]bind for I915_GEM_VM_BIND_SVM_OBJ type **/<br>
+       __u32 handle;<br>
+<br>
+       /** vm to [un]bind **/<br>
+       __u32 vm_id;<br>
+<br>
+       /** Flags **/<br>
+       __u32 flags;<br>
+#define I915_GEM_VM_BIND_UNBIND      (1 << 0)<br>
+#define I915_GEM_VM_BIND_READONLY    (1 << 1)<br>
+};<br>
+<br>
 #if defined(__cplusplus)<br>
 }<br>
 #endif<br>
-- <br>
2.21.0.rc0.32.g243a4c7e27<br>
<br>
_______________________________________________<br>
Intel-gfx mailing list<br>
<a href="mailto:Intel-gfx@lists.freedesktop.org" target="_blank">Intel-gfx@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/intel-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/intel-gfx</a><br>
</blockquote></div></div>