[PATCH v5 7/9] drm/amdgpu: add userq va unmap validated helper

Prike Liang Prike.Liang at amd.com
Fri Jul 4 10:33:06 UTC 2025


This helper can validate the userq whether can be
unmapped prior to the userq VA GEM unmap.

Signed-off-by: Prike Liang <Prike.Liang at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 78 +++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |  3 +
 2 files changed, 81 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 25a35ab7395b..30838e5279bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -1180,3 +1180,81 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
 	mutex_unlock(&adev->userq_mutex);
 	return ret;
 }
+
+static bool amdgpu_userq_gem_va_unmap_queue_retrieve(struct amdgpu_usermode_queue *queue,
+							uint64_t va)
+{
+	va = va << AMDGPU_GPU_PAGE_SHIFT | AMDGPU_GMC_HOLE_END;
+
+	switch (queue->queue_type) {
+	case AMDGPU_HW_IP_GFX:
+		if (queue->queue_va == va ||
+		    queue->wptr_va  == va ||
+		    queue->rptr_va  == va ||
+		    queue->shadow_va == va ||
+		    queue->csa_va  == va)
+			return true;
+		break;
+	case AMDGPU_HW_IP_COMPUTE:
+		  if (queue->queue_va == va ||
+		      queue->wptr_va == va ||
+		      queue->rptr_va  == va ||
+		      queue->eop_va  == va)
+			return true;
+		break;
+	case AMDGPU_HW_IP_DMA:
+		if (queue->queue_va == va ||
+		    queue->wptr_va == va ||
+		    queue->rptr_va == va ||
+		    queue->csa_va == va)
+			return true;
+		break;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_vm *vm,
+				uint64_t va)
+{
+	struct amdgpu_fpriv *fpriv = vm_to_fpriv(vm);
+	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+
+	if (&fpriv->vm == vm) {
+		struct amdgpu_usermode_queue *queue;
+		int queue_id, r = 0;
+
+		if (mutex_trylock(&uq_mgr->userq_mutex)) {
+			/* If here the userq bo is busy and needs to deactivate and prevent reusing it.*/
+			idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+				struct dma_fence *f = queue->last_fence;
+
+
+				if (!amdgpu_userq_gem_va_unmap_queue_retrieve(queue, va)) {
+					dev_dbg(uq_mgr->adev->dev, "queue(id:%d) not belond to vm:%p\n",
+						queue_id,vm);
+					continue;
+				}
+
+				if (f && !dma_fence_is_signaled(f)) {
+
+					dev_warn(uq_mgr->adev->dev, "try to unmap the busy queue(id:%d):%p under vm:%p\n",
+						queue_id, queue, vm);
+					/* Need to set a resonable state for avoiding reusing this queue*/
+					queue->state = AMDGPU_USERQ_STATE_HUNG;
+					r++;
+				}
+			}
+			mutex_unlock(&uq_mgr->userq_mutex);
+			return r;
+		} else {
+			/* do we need a try lock again before return*/
+			return -EBUSY;
+		}
+
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index 194ec7a6b3b2..08c49d738ec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -31,6 +31,7 @@
 #define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
 #define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
 #define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+#define vm_to_fpriv(v)  container_of(v, struct amdgpu_fpriv, vm)
 
 enum amdgpu_userq_state {
 	AMDGPU_USERQ_STATE_UNMAPPED = 0,
@@ -148,4 +149,6 @@ bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
 int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr);
 int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
 			struct amdgpu_usermode_queue *queue);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_vm *vm,
+				uint64_t va);
 #endif
-- 
2.34.1



More information about the amd-gfx mailing list