[PATCH v6 06/13] drm/xe/exec_queue: Prepare last fence for hw engine group resume context
Francois Dugast
francois.dugast at intel.com
Tue Aug 6 19:27:04 UTC 2024
Ensure we can safely take a ref of the exec queue's last fence from the
context of resuming jobs from the hw engine group. The locking requirements
differ from the general case, hence the introduction of
xe_exec_queue_last_fence_{get,put}_for_resume().
v2: Add kernel doc, rework the code to prevent code duplication
v3: Fix kernel doc, remove now unnecessary lockdep variants (Matt Brost)
Signed-off-by: Francois Dugast <francois.dugast at intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue.c | 48 ++++++++++++++++++++++++++++--
drivers/gpu/drm/xe/xe_exec_queue.h | 3 ++
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 85da2a30a4a4..99ddcc826bce 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -818,10 +818,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
struct xe_vm *vm)
{
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
lockdep_assert_held(&vm->lock);
- else
+ } else {
xe_vm_assert_held(vm);
+ lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
+ }
}
/**
@@ -836,6 +838,21 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
xe_exec_queue_last_fence_put_unlocked(q);
}
+/**
+ * xe_exec_queue_last_fence_put_for_resume() - Drop ref to last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Only safe to be called in the context of resuming the hw engine group's
+ * long-running exec queue, when the group semaphore is held.
+ */
+void xe_exec_queue_last_fence_put_for_resume(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+ xe_exec_queue_last_fence_put_unlocked(q);
+}
+
/**
* xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
* @q: The exec queue
@@ -875,6 +892,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
return fence;
}
+/**
+ * xe_exec_queue_last_fence_get_for_resume() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref. Only safe to be called in the context of
+ * resuming the hw engine group's long-running exec queue, when the group
+ * semaphore is held.
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put_for_resume(q, vm);
+
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
/**
* xe_exec_queue_last_fence_set() - Set last fence
* @q: The exec queue
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index ded77b0f3b90..fc9884a6ce85 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -71,8 +71,11 @@ enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_devi
void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
+void xe_exec_queue_last_fence_put_for_resume(struct xe_exec_queue *e, struct xe_vm *vm);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
+ struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
--
2.43.0
More information about the Intel-xe
mailing list