[PATCH 2/3] drm/xe: Manage MSI-X interrupts allocation
Levi, Ilia
ilia.levi at intel.com
Mon Nov 4 14:47:03 UTC 2024
Expose functions to request and free MSI-X interrupts.
The request has two flavors:
- Static MSI-X allocation, for known MSI-X interrupts (e.g. GuC-to-host)
- Dynamic MSI-X allocation, which uses the next available MSI-X interrupt
Signed-off-by: Ilia Levi <ilia.levi at intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 2 +
drivers/gpu/drm/xe/xe_irq_msix.c | 90 ++++++++++++++++++++++------
drivers/gpu/drm/xe/xe_irq_msix.h | 5 ++
3 files changed, 80 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 8237bb1474c6..c7a695741feb 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -355,6 +355,8 @@ struct xe_device {
u16 num_of_interrupts;
/** @irq.msix.default_msix: Default MSI-X vector */
u16 default_msix;
+ /** @irq.msix.indexes: used to allocate MSI-X indexes */
+ struct xarray indexes;
} msix;
} irq;
diff --git a/drivers/gpu/drm/xe/xe_irq_msix.c b/drivers/gpu/drm/xe/xe_irq_msix.c
index dac339ba5ed2..f873bcfe5283 100644
--- a/drivers/gpu/drm/xe/xe_irq_msix.c
+++ b/drivers/gpu/drm/xe/xe_irq_msix.c
@@ -24,6 +24,7 @@ void xe_irq_msix_init(struct xe_device *xe, int nvec)
xe->irq.msix.enabled = true;
xe->irq.msix.num_of_interrupts = nvec;
xe->irq.msix.default_msix = DEFAULT_MSIX;
+ xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
}
static irqreturn_t guc2host_irq_handler(int irq, void *arg)
@@ -66,8 +67,32 @@ static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler,
- const char *name, u16 msix)
+static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
+ bool dynamic_msix, u16 *msix)
+{
+ struct xa_limit limit;
+ int ret;
+ u32 id;
+
+ limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.num_of_interrupts - 1) :
+ XA_LIMIT(*msix, *msix);
+ ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (dynamic_msix)
+ *msix = id;
+
+ return 0;
+}
+
+static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
+{
+ xa_erase(&xe->irq.msix.indexes, msix);
+}
+
+static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
+ void *irq_buf, const char *name, u16 msix)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int ret, irq;
@@ -76,17 +101,41 @@ static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler,
if (irq < 0)
return irq;
- ret = request_irq(irq, handler, IRQF_SHARED, name, xe);
+ ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
if (ret < 0)
return ret;
return 0;
}
-static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix)
+{
+ int ret;
+
+ ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
+ if (ret)
+ return ret;
+
+ ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
+ xe_irq_msix_release_vector(xe, *msix);
+ return ret;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int irq;
+ void *irq_buf;
+
+ irq_buf = xa_load(&xe->irq.msix.indexes, msix);
+ if (!irq_buf)
+ return;
irq = pci_irq_vector(pdev, msix);
if (irq < 0) {
@@ -94,24 +143,25 @@ static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
return;
}
- free_irq(irq, xe);
+ free_irq(irq, irq_buf);
+ xe_irq_msix_release_vector(xe, msix);
}
int xe_irq_msix_request(struct xe_device *xe)
{
int err;
+ u16 msix;
- err = xe_irq_msix_request_irq(xe, guc2host_irq_handler,
- DRIVER_NAME "-guc2host", GUC2HOST_MSIX);
- if (err) {
- drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", GUC2HOST_MSIX, err);
+ msix = GUC2HOST_MSIX;
+ err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ DRIVER_NAME "-guc2host", false, &msix);
+ if (err)
return err;
- }
- err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler,
- DRIVER_NAME "-default-msix", DEFAULT_MSIX);
+ msix = DEFAULT_MSIX;
+ err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
+ DRIVER_NAME "-default-msix", false, &msix);
if (err) {
- drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", DEFAULT_MSIX, err);
xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
return err;
}
@@ -121,14 +171,20 @@ int xe_irq_msix_request(struct xe_device *xe)
void xe_irq_msix_free(struct xe_device *xe)
{
- xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
- xe_irq_msix_free_irq(xe, DEFAULT_MSIX);
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ xe_irq_msix_free_irq(xe, msix);
+ xa_destroy(&xe->irq.msix.indexes);
}
void xe_irq_msix_synchronize_irq(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned long msix;
+ u32 *dummy;
- synchronize_irq(pci_irq_vector(pdev, GUC2HOST_MSIX));
- synchronize_irq(pci_irq_vector(pdev, DEFAULT_MSIX));
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ synchronize_irq(pci_irq_vector(pdev, msix));
}
diff --git a/drivers/gpu/drm/xe/xe_irq_msix.h b/drivers/gpu/drm/xe/xe_irq_msix.h
index 9c36d11f05fc..a9c8ce18172e 100644
--- a/drivers/gpu/drm/xe/xe_irq_msix.h
+++ b/drivers/gpu/drm/xe/xe_irq_msix.h
@@ -6,11 +6,16 @@
#ifndef _XE_IRQ_MSIX_H_
#define _XE_IRQ_MSIX_H_
+#include <linux/interrupt.h>
+
struct xe_device;
void xe_irq_msix_init(struct xe_device *xe, int nvec);
void xe_irq_msix_free(struct xe_device *xe);
int xe_irq_msix_request(struct xe_device *xe);
void xe_irq_msix_synchronize_irq(struct xe_device *xe);
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix);
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
#endif
--
2.43.2
More information about the Intel-xe
mailing list