[PATCH v2 2/3] drm/xe: Use device, gt ordered work queues for resource cleanup
Matthew Brost
matthew.brost at intel.com
Mon Apr 1 22:19:12 UTC 2024
Resource cleanup is a device private operations with no expectation of
performance. Use device, gt ordered work queues to cleanup resources to
avoid grabbing locks on shared work queues.
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 5 ++++-
drivers/gpu/drm/xe/xe_execlist.c | 2 +-
drivers/gpu/drm/xe/xe_gt_types.h | 5 ++++-
drivers/gpu/drm/xe/xe_guc_submit.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 4 ++--
5 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index c710cec835a7..d696aa2de8cc 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -366,7 +366,10 @@ struct xe_device {
/** @preempt_fence_wq: used to serialize preempt fences */
struct workqueue_struct *preempt_fence_wq;
- /** @ordered_wq: used to serialize compute mode resume */
+ /**
+ * @ordered_wq: used to serialize compute mode resume, cleanup
+ * resources
+ */
struct workqueue_struct *ordered_wq;
/** @unordered_wq: used to serialize unordered work, mostly display */
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index dece2785933c..1ae922509f05 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -393,7 +393,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
{
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ queue_work(q->gt->ordered_wq, &q->execlist->fini_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 2143dffcaf11..cd22ad6e881a 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -268,7 +268,10 @@ struct xe_gt {
} acc_queue[NUM_ACC_QUEUE];
} usm;
- /** @ordered_wq: used to serialize GT resets and TDRs */
+ /**
+ * @ordered_wq: used to serialize GT resets and TDRs, clean up
+ * resources
+ */
struct workqueue_struct *ordered_wq;
/** @uc: micro controllers on the GT */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 13b7e195c7b5..e30ad9fccf6c 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1033,7 +1033,7 @@ static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
__guc_exec_queue_fini_async(&q->guc->fini_async);
else
- queue_work(system_wq, &q->guc->fini_async);
+ queue_work(q->gt->ordered_wq, &q->guc->fini_async);
}
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8b32aa5003df..7808b540c013 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1005,7 +1005,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
- queue_work(system_unbound_wq, &vma->destroy_work);
+ queue_work(xe_vma_vm(vma)->xe->ordered_wq, &vma->destroy_work);
}
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
@@ -1625,7 +1625,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
- queue_work(system_unbound_wq, &vm->destroy_work);
+ queue_work(vm->xe->ordered_wq, &vm->destroy_work);
}
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
--
2.34.1
More information about the Intel-xe
mailing list