[Intel-xe] [RFC PATCH] drm/xe/uapi: Uniform async vs sync handling

Matthew Brost matthew.brost at intel.com
Wed Dec 6 17:07:29 UTC 2023


Remove concept of async vs sync VM bind queues, rather make async vs
sync a per IOCTL choice. Since this is per IOCTL, it makes sense to have
a singular flag IOCTL rather than per VM bind op flag too. Add
DRM_XE_ZERO_SYNCS_FLAG_WAIT_FOR_OP which is an input sync flag to
support this. Support this new flags for both the VM bind IOCTL and the
exec IOCTL to match behavior.

Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Francois Dugast <francois.dugast at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
 include/uapi/drm/xe_drm.h | 29 ++++++++++++++++++-----------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index eb03a49c17a1..8f4fc08402fd 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -141,8 +141,7 @@ struct drm_xe_engine_class_instance {
 	 * Kernel only classes (not actual hardware engine class). Used for
 	 * creating ordered queues of VM bind operations.
 	 */
-#define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC	5
-#define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC	6
+#define DRM_XE_ENGINE_CLASS_VM_BIND		5
 	__u16 engine_class;
 
 	__u16 engine_instance;
@@ -660,7 +659,6 @@ struct drm_xe_vm_create {
 	 * still enable recoverable pagefaults if supported by the device.
 	 */
 #define DRM_XE_VM_CREATE_FLAG_LR_MODE	        (1 << 1)
-#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT	(1 << 2)
 	/*
 	 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE requires also
 	 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated
@@ -668,7 +666,7 @@ struct drm_xe_vm_create {
 	 * The xe driver internally uses recoverable pagefaults to implement
 	 * this.
 	 */
-#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE	(1 << 3)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE	(1 << 2)
 	/** @flags: Flags */
 	__u32 flags;
 
@@ -776,12 +774,11 @@ struct drm_xe_vm_bind_op {
 	__u32 op;
 
 #define DRM_XE_VM_BIND_FLAG_READONLY	(1 << 0)
-#define DRM_XE_VM_BIND_FLAG_ASYNC	(1 << 1)
 	/*
 	 * Valid on a faulting VM only, do the MAP operation immediately rather
 	 * than deferring the MAP to the page fault handler.
 	 */
-#define DRM_XE_VM_BIND_FLAG_IMMEDIATE	(1 << 2)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE	(1 << 1)
 	/*
 	 * When the NULL flag is set, the page tables are setup with a special
 	 * bit which indicates writes are dropped and all reads return zero.  In
@@ -789,7 +786,7 @@ struct drm_xe_vm_bind_op {
 	 * operations, the BO handle MBZ, and the BO offset MBZ. This flag is
 	 * intended to implement VK sparse bindings.
 	 */
-#define DRM_XE_VM_BIND_FLAG_NULL	(1 << 3)
+#define DRM_XE_VM_BIND_FLAG_NULL	(1 << 2)
 	/** @flags: Bind flags */
 	__u32 flags;
 
@@ -844,8 +841,14 @@ struct drm_xe_vm_bind {
 	/** @num_syncs: amount of syncs to wait on */
 	__u32 num_syncs;
 
-	/** @syncs: pointer to struct drm_xe_sync array */
-	__u64 syncs;
+	union {
+		/** @syncs: pointer to struct drm_xe_sync array */
+		__u64 syncs;
+
+#define DRM_XE_ZERO_SYNCS_FLAG_WAIT_FOR_OP (1 << 0)
+		/** @zero_syncs_flags: when @num_syncs == 0, flags */
+		__u64 zero_syncs_flags;
+	};
 
 	/** @reserved: Reserved */
 	__u64 reserved[2];
@@ -980,8 +983,12 @@ struct drm_xe_exec {
 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
 	__u32 num_syncs;
 
-	/** @syncs: Pointer to struct drm_xe_sync array. */
-	__u64 syncs;
+	union {
+		/** @syncs: pointer to struct drm_xe_sync array */
+		__u64 syncs;
+		/** @zero_syncs_flags: when @num_syncs == 0, flags */
+		__u64 zero_syncs_flags;
+	};
 
 	/**
 	 * @address: address of batch buffer if num_batch_buffer == 1 or an
-- 
2.34.1



More information about the Intel-xe mailing list