[PATCH v2 05/12] drm/xe/exec_queue: Prepare last fence for hw engine group resume context
Francois Dugast
francois.dugast at intel.com
Wed Jul 24 19:32:58 UTC 2024
Ensure we can safely take a ref of the exec queue's last fence from the
context of resuming jobs from the hw engine group. In this case the
asserts lockdep_assert_held*() are used to check if the semaphore is
properly held. This is why we need a new variant of
xe_exec_queue_last_fence().
Signed-off-by: Francois Dugast <francois.dugast at intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue.c | 33 ++++++++++++++++++++++++++++--
drivers/gpu/drm/xe/xe_exec_queue.h | 2 ++
2 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 6fc952755cb3..2a817206b91f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -837,10 +837,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
struct xe_vm *vm)
{
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
lockdep_assert_held(&vm->lock);
- else
+ } else {
xe_vm_assert_held(vm);
+ lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
+ }
}
/**
@@ -897,6 +899,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
return fence;
}
+/**
+ * xe_exec_queue_last_fence_get_to_resume() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref, from the context of resuming jobs
+ * of the hw engine group, where xe_exec_queue_last_fence_lockdep_assert
+ * is not applicable, hence rely on the group semaphore instead.
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get_to_resume(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put(q, vm);
+
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
/**
* xe_exec_queue_last_fence_set() - Set last fence
* @q: The exec queue
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 289a3a51d2a2..b5ac55967f1d 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -73,6 +73,8 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
+struct dma_fence *xe_exec_queue_last_fence_get_to_resume(struct xe_exec_queue *e,
+ struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
--
2.43.0
More information about the Intel-xe
mailing list