[PATCH 7/8] drm/amdgpu: add plumbing for ctx priority changes v2

Andres Rodriguez andresx7 at gmail.com
Fri Jul 7 19:26:09 UTC 2017


Introduce amdgpu_ctx_priority_override(). A mechanism to override a
context's priority.

An override can be terminated by setting the override to
AMD_SCHED_PRIORITY_UNSET.

v2: change refcounted interface for a direct set

Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
Acked-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  6 +++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 29 +++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 479a1e9..7251823 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -825,7 +825,9 @@ struct amdgpu_ctx {
 	spinlock_t		ring_lock;
 	struct dma_fence	**fences;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
-	bool preamble_presented;
+	bool 			preamble_presented;
+	enum amd_sched_priority init_priority;
+	enum amd_sched_priority override_priority;
 };
 
 struct amdgpu_ctx_mgr {
@@ -842,6 +844,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 			      struct dma_fence *fence);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 				   struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+				  enum amd_sched_priority priority);
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e4de0fb..bf05180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -72,6 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 	}
 
 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+	ctx->init_priority = priority;
+	ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
 
 	/* create context entity for each ring */
 	for (i = 0; i < adev->num_rings; i++) {
@@ -360,6 +362,33 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 	return fence;
 }
 
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+				  enum amd_sched_priority priority)
+{
+	int i;
+	struct amdgpu_device *adev = ctx->adev;
+	struct amd_sched_rq *rq;
+	struct amd_sched_entity *entity;
+	struct amdgpu_ring *ring;
+	enum amd_sched_priority ctx_prio;
+
+	ctx->override_priority = priority;
+
+	ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+			ctx->init_priority : ctx->override_priority;
+
+	for (i = 0; i < adev->num_rings; i++) {
+		ring = adev->rings[i];
+		entity = &ctx->rings[i].entity;
+		rq = &ring->sched.sched_rq[ctx_prio];
+
+		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+			continue;
+
+		amd_sched_entity_set_rq(entity, rq);
+	}
+}
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
 	mutex_init(&mgr->lock);
-- 
2.9.3



More information about the amd-gfx mailing list