[PATCH v4 3/3] drm/xe/uapi: Support requesting unique MSI-X for exec queue

Ilia Levi ilia.levi at intel.com
Sun Nov 10 20:39:15 UTC 2024


From: Dani Liberman <dliberman at habana.ai>

Unique MSI-X per exec queue will improve the performance of the
IRQ handler. In case no MSI-X is available, the uAPI will return
-EBUSY error and the user would be able to execute the uAPI again
without the flag (fallback to default MSI-X).

Co-authored-by: Ilia Levi <ilia.levi at intel.com>
Signed-off-by: Dani Liberman <dliberman at habana.ai>
---
 drivers/gpu/drm/xe/xe_exec_queue.c       | 58 ++++++++++++++++++++++--
 drivers/gpu/drm/xe/xe_exec_queue_types.h |  2 +
 drivers/gpu/drm/xe/xe_irq_msix.c         | 26 +++++++++++
 drivers/gpu/drm/xe/xe_irq_msix.h         |  1 +
 include/uapi/drm/xe_drm.h                |  8 +++-
 5 files changed, 89 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fe3a10825245..f4c9ee87e256 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -12,10 +12,12 @@
 #include <uapi/drm/xe_drm.h>
 
 #include "xe_device.h"
+#include "xe_drv.h"
 #include "xe_gt.h"
 #include "xe_hw_engine_class_sysfs.h"
 #include "xe_hw_engine_group.h"
 #include "xe_hw_fence.h"
+#include "xe_irq_msix.h"
 #include "xe_lrc.h"
 #include "xe_macros.h"
 #include "xe_migrate.h"
@@ -34,8 +36,46 @@ enum xe_exec_queue_sched_prop {
 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
 				      u64 extensions, int ext_number);
 
+static int xe_exec_queue_msix_init(struct xe_device *xe, struct xe_exec_queue *q, bool unique_msix)
+{
+	u16 msix;
+	int ret = 0;
+
+	if (!xe_device_has_msix(xe))
+		return 0;
+
+	if (!unique_msix) {
+		q->msix = xe->irq.msix.default_msix;
+		return 0;
+	}
+
+	ret = xe_irq_msix_request_irq(xe, xe_irq_msix_hwe_handler, q,
+				      DRIVER_NAME "-exec-queue", true, &msix);
+	if (ret < 0) {
+		drm_dbg(&xe->drm, "Can't allocate unique MSI-X to exec queue (%d)\n", ret);
+		return ret;
+	}
+
+	q->msix = msix;
+
+	return 0;
+}
+
+static void xe_exec_queue_msix_fini(struct xe_exec_queue *q)
+{
+	struct xe_device *xe = gt_to_xe(q->gt);
+
+	if (!xe_device_has_msix(xe))
+		return;
+
+	if (q->msix && q->msix != xe->irq.msix.default_msix)
+		xe_irq_msix_free_irq(xe, q->msix);
+}
+
 static void __xe_exec_queue_free(struct xe_exec_queue *q)
 {
+	xe_exec_queue_msix_fini(q);
+
 	if (q->vm)
 		xe_vm_put(q->vm);
 
@@ -68,7 +108,12 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
 	q->gt = gt;
 	q->class = hwe->class;
 	q->width = width;
-	q->msix = xe->irq.msix.default_msix;
+	err = xe_exec_queue_msix_init(xe, q, flags & EXEC_QUEUE_FLAG_UNIQUE_MSIX);
+	if (err) {
+		kfree(q);
+		return ERR_PTR(err);
+	}
+
 	q->logical_mask = logical_mask;
 	q->fence_irq = &gt->fence_irq[hwe->class];
 	q->ring_ops = gt->ring_ops[hwe->class];
@@ -542,13 +587,13 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
 	struct xe_gt *gt;
 	struct xe_tile *tile;
 	struct xe_exec_queue *q = NULL;
+	u32 flags = 0;
 	u32 logical_mask;
 	u32 id;
 	u32 len;
 	int err;
 
-	if (XE_IOCTL_DBG(xe, args->flags) ||
-	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
 		return -EINVAL;
 
 	len = args->width * args->num_placements;
@@ -564,6 +609,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
 		return -EINVAL;
 
+	if (args->flags & DRM_XE_EXEC_QUEUE_CREATE_FLAG_UNIQUE_INTERRUPT_HINT)
+		flags |= EXEC_QUEUE_FLAG_UNIQUE_MSIX;
+
 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
@@ -572,8 +620,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
 
 		for_each_tile(tile, xe, id) {
 			struct xe_exec_queue *new;
-			u32 flags = EXEC_QUEUE_FLAG_VM;
 
+			flags |= EXEC_QUEUE_FLAG_VM;
 			if (id)
 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
 
@@ -620,7 +668,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
 		}
 
 		q = xe_exec_queue_create(xe, vm, logical_mask,
-					 args->width, hwe, 0,
+					 args->width, hwe, flags,
 					 args->extensions);
 		up_read(&vm->lock);
 		xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 19fd66d59dd7..047a3fae849f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -85,6 +85,8 @@ struct xe_exec_queue {
 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(3)
 /* kernel exec_queue only, set priority to highest level */
 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(4)
+/* queue with unique msix interrupt */
+#define EXEC_QUEUE_FLAG_UNIQUE_MSIX		BIT(5)
 
 	/**
 	 * @flags: flags for this exec queue, should statically setup aside from ban
diff --git a/drivers/gpu/drm/xe/xe_irq_msix.c b/drivers/gpu/drm/xe/xe_irq_msix.c
index f873bcfe5283..53d05aafb2a7 100644
--- a/drivers/gpu/drm/xe/xe_irq_msix.c
+++ b/drivers/gpu/drm/xe/xe_irq_msix.c
@@ -9,6 +9,7 @@
 
 #include "xe_device.h"
 #include "xe_drv.h"
+#include "xe_exec_queue_types.h"
 #include "xe_guc.h"
 #include "xe_memirq.h"
 
@@ -67,6 +68,23 @@ static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
+/**
+ * xe_irq_msix_hwe_handler - MSI-X IRQ handler for hw engines
+ * @irq: the irq raised
+ * @arg: the exec queue that raised the interrupt
+ *
+ * In MSI-X mode command streamers raise an interrupt only as a result of
+ * MI_USER_INTERRUPT and MI_FLUSH_DW_NOTIFY commands.
+ */
+irqreturn_t xe_irq_msix_hwe_handler(int irq, void *arg)
+{
+	struct xe_exec_queue *q = arg;
+
+	xe_memirq_hwe_handler(&gt_to_tile(q->hwe->gt)->memirq, q->hwe);
+
+	return IRQ_HANDLED;
+}
+
 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
 				    bool dynamic_msix, u16 *msix)
 {
@@ -133,6 +151,14 @@ void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
 	int irq;
 	void *irq_buf;
 
+	/*
+	 * When removing the driver this function can be called twice for each msix that was
+	 * allocated for exec queue. First from irq_uninstall() and then from exec queue free.
+	 * Hence, it is possible that the xarray was already destroyed or the entry was removed.
+	 */
+	if (xa_empty(&xe->irq.msix.indexes))
+		return;
+
 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
 	if (!irq_buf)
 		return;
diff --git a/drivers/gpu/drm/xe/xe_irq_msix.h b/drivers/gpu/drm/xe/xe_irq_msix.h
index a9c8ce18172e..04619d991b10 100644
--- a/drivers/gpu/drm/xe/xe_irq_msix.h
+++ b/drivers/gpu/drm/xe/xe_irq_msix.h
@@ -17,5 +17,6 @@ void xe_irq_msix_synchronize_irq(struct xe_device *xe);
 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
 			    const char *name, bool dynamic_msix, u16 *msix);
 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
+irqreturn_t xe_irq_msix_hwe_handler(int irq, void *arg);
 
 #endif
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 4a8a4a63e99c..5f32553b14fb 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -1123,7 +1123,13 @@ struct drm_xe_exec_queue_create {
 	/** @vm_id: VM to use for this exec queue */
 	__u32 vm_id;
 
-	/** @flags: MBZ */
+	/*
+	 * When creating exec queue in MSIX platforms, the user can request a unique MSIX interrupt
+	 * for the irq handler. If there is no available MSIX, -EBUSY will be returned.
+	 */
+#define	DRM_XE_EXEC_QUEUE_CREATE_FLAG_UNIQUE_INTERRUPT_HINT (0x1 << 0)
+
+	/** @flags: create queue flags */
 	__u32 flags;
 
 	/** @exec_queue_id: Returned exec queue ID */
-- 
2.43.2



More information about the Intel-xe mailing list