[PATCH 3/3] drm/amdgpu: add a mechanism to acquire gpu exclusivity

Andres Rodriguez andresx7 at gmail.com
Thu May 25 00:01:01 UTC 2017


A DRM_MASTER may wish to restrict gpu job submission to only a limited
set of clients. To enable this use case we provide the following new
IOCTL APIs:
  * A mechanism to change a process's ctx priorities
  * A mechanism to limit the minimum priority required for the gpu
    scheduler to queue a job to the HW

This functionality is useful in VR use cases, where two compositors are
operating simultaneously, e.g. X + SteamVRComposer.

In this case SteamVRComposer can limit gpu access to itself + the
relevant clients. Once critical work is complete, and if enough time is
available until the next HMD vblank, general access to the gpu can be
restored.

The operation is limited to DRM_MASTER since it may lead to starvation.
The implementation of drm leases is required to extend DRM_MASTER
status to the SteamVRComposer.

Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile           |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |   3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |  39 ++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |   2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c     | 131 ++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h     |  34 +++++++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |  81 ++++++++++++++--
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  14 ++-
 include/uapi/drm/amdgpu_drm.h                 |  26 +++++
 9 files changed, 306 insertions(+), 26 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index b62d9e9..e4d3b07 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
 	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
-	amdgpu_queue_mgr.o
+	amdgpu_queue_mgr.o amdgpu_sched.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3722352..9681de7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -833,6 +833,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 			      struct dma_fence *fence);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 				   struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_set_priority(struct amdgpu_device *adev,
+			     struct amdgpu_ctx *ctx,
+			     enum amd_sched_priority priority);
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 43fe5ae..996434f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -24,6 +24,7 @@
 
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_sched.h"
 
 static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			   enum amd_sched_priority priority,
@@ -198,23 +199,6 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
 	return 0;
 }
 
-static enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
-{
-	switch (amdgpu_priority) {
-	case AMDGPU_CTX_PRIORITY_HIGH_HW:
-		return AMD_SCHED_PRIORITY_HIGH_HW;
-	case AMDGPU_CTX_PRIORITY_HIGH_SW:
-		return AMD_SCHED_PRIORITY_HIGH_SW;
-	case AMDGPU_CTX_PRIORITY_NORMAL:
-		return AMD_SCHED_PRIORITY_NORMAL;
-	case AMDGPU_CTX_PRIORITY_LOW:
-		return AMD_SCHED_PRIORITY_LOW;
-	default:
-		WARN(1, "Invalid context priority %d\n", amdgpu_priority);
-		return AMD_SCHED_PRIORITY_INVALID;
-	}
-}
-
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
@@ -337,6 +321,27 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 	return fence;
 }
 
+void amdgpu_ctx_set_priority(struct amdgpu_device *adev,
+			     struct amdgpu_ctx *ctx,
+			     enum amd_sched_priority priority)
+{
+	int i;
+	struct amd_sched_rq *rq;
+	struct amd_sched_entity *entity;
+	struct amdgpu_ring *ring;
+
+	spin_lock(&ctx->ring_lock);
+	for (i = 0; i < adev->num_rings; i++) {
+		ring = adev->rings[i];
+		entity = &ctx->rings[i].entity;
+		rq = &ring->sched.sched_rq[priority];
+
+		if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
+			amd_sched_entity_set_rq(entity, rq);
+	}
+	spin_unlock(&ctx->ring_lock);
+}
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
 	mutex_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8c26ee1..376851d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include <drm/amdgpu_drm.h>
+#include "amdgpu_sched.h"
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
 
@@ -1009,6 +1010,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	/* KMS */
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 0000000..04e2a51
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7 at gmail.com>
+ */
+
+#include <linux/fdtable.h>
+#include <linux/pid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "amdgpu_vm.h"
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+{
+	switch (amdgpu_priority) {
+	case AMDGPU_CTX_PRIORITY_HIGH_HW:
+		return AMD_SCHED_PRIORITY_HIGH_HW;
+	case AMDGPU_CTX_PRIORITY_HIGH_SW:
+		return AMD_SCHED_PRIORITY_HIGH_SW;
+	case AMDGPU_CTX_PRIORITY_NORMAL:
+		return AMD_SCHED_PRIORITY_NORMAL;
+	case AMDGPU_CTX_PRIORITY_LOW:
+		return AMD_SCHED_PRIORITY_LOW;
+	default:
+		WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+		return AMD_SCHED_PRIORITY_INVALID;
+	}
+}
+
+static int amdgpu_sched_process_priority_set(struct amdgpu_device *adev,
+					     int fd,
+					     enum amd_sched_priority priority)
+{
+	struct file *filp = fcheck(fd);
+	struct drm_file *file;
+	struct pid *pid;
+	struct amdgpu_fpriv *fpriv;
+	struct amdgpu_ctx *ctx;
+	uint32_t id;
+
+	if (!filp)
+		return -EINVAL;
+
+	pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+
+	mutex_lock(&adev->ddev->filelist_mutex);
+	list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+		if (file->pid != pid)
+			continue;
+
+		fpriv = file->driver_priv;
+		idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+			amdgpu_ctx_set_priority(adev, ctx, priority);
+	}
+	mutex_unlock(&adev->ddev->filelist_mutex);
+
+	put_pid(pid);
+
+	return 0;
+}
+
+static int amdgpu_sched_min_priority_get(struct amdgpu_device *adev,
+					 enum amd_sched_priority priority)
+{
+	int i;
+
+	for (i = 0; i < adev->num_rings; i++)
+		amd_sched_min_priority_get(&adev->rings[i]->sched, priority);
+
+	return 0;
+}
+
+static int amdgpu_sched_min_priority_put(struct amdgpu_device *adev,
+					 enum amd_sched_priority priority)
+{
+	int i;
+
+	for (i = 0; i < adev->num_rings; i++)
+		amd_sched_min_priority_put(&adev->rings[i]->sched, priority);
+
+	return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *filp)
+{
+	union drm_amdgpu_sched *args = data;
+	struct amdgpu_device *adev = dev->dev_private;
+	enum amd_sched_priority priority;
+	int r;
+
+	priority = amdgpu_to_sched_priority(args->in.priority);
+	if (args->in.reserved || priority == AMD_SCHED_PRIORITY_INVALID)
+		return -EINVAL;
+
+	switch (args->in.op) {
+	case AMDGPU_SCHED_OP_PROCESS_PRIORITY_SET:
+		r = amdgpu_sched_process_priority_set(adev, args->in.fd, priority);
+		break;
+	case AMDGPU_SCHED_OP_MIN_PRIORITY_GET:
+		r = amdgpu_sched_min_priority_get(adev, priority);
+		break;
+	case AMDGPU_SCHED_OP_MIN_PRIORITY_PUT:
+		r = amdgpu_sched_min_priority_put(adev, priority);
+		break;
+	default:
+		r = -EINVAL;
+		break;
+	}
+
+	return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
new file mode 100644
index 0000000..b28c067
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7 at gmail.com>
+ */
+
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
+
+#include <drm/drmP.h>
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *filp);
+
+#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6f..4f2cbe9 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 	entity->rq = rq;
 	entity->sched = sched;
 
+	spin_lock_init(&entity->rq_lock);
 	spin_lock_init(&entity->queue_lock);
 	r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
 	if (r)
@@ -204,8 +205,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 			   struct amd_sched_entity *entity)
 {
-	struct amd_sched_rq *rq = entity->rq;
-
 	if (!amd_sched_entity_is_initialized(sched, entity))
 		return;
 
@@ -215,7 +214,8 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 	*/
 	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
 
-	amd_sched_rq_remove_entity(rq, entity);
+	amd_sched_entity_set_rq(entity, NULL);
+
 	kfifo_free(&entity->job_queue);
 }
 
@@ -236,6 +236,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
 	dma_fence_put(f);
 }
 
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+			     struct amd_sched_rq *rq)
+{
+	if (entity->rq == rq)
+		return;
+
+	spin_lock(&entity->rq_lock);
+
+	if (entity->rq)
+		amd_sched_rq_remove_entity(entity->rq, entity);
+
+	entity->rq = rq;
+	if (rq)
+		amd_sched_rq_add_entity(rq, entity);
+
+	spin_unlock(&entity->rq_lock);
+}
+
 bool amd_sched_dependency_optimized(struct dma_fence* fence,
 				    struct amd_sched_entity *entity)
 {
@@ -333,7 +351,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 	/* first job wakes up scheduler */
 	if (first) {
 		/* Add the entity to the run queue */
+		spin_lock(&entity->rq_lock);
 		amd_sched_rq_add_entity(entity->rq, entity);
+		spin_unlock(&entity->rq_lock);
 		amd_sched_wakeup(sched);
 	}
 	return added;
@@ -523,18 +543,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
 static struct amd_sched_entity *
 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 {
-	struct amd_sched_entity *entity;
-	int i;
+	struct amd_sched_entity *entity = NULL;
+	int i, min_required_prio;
 
 	if (!amd_sched_ready(sched))
 		return NULL;
 
+	spin_lock(&sched->min_required_prio_lock);
+	min_required_prio = sched->min_required_prio;
+
 	/* Kernel run queue has higher priority than normal run queue*/
-	for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+	for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= min_required_prio; i--) {
 		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
 		if (entity)
 			break;
 	}
+	spin_unlock(&sched->min_required_prio_lock);
 
 	return entity;
 }
@@ -638,15 +662,19 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
 	sched->hw_submission_limit = hw_submission;
 	sched->name = name;
 	sched->timeout = timeout;
-	for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
+	for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) {
 		amd_sched_rq_init(&sched->sched_rq[i]);
+		atomic_set(&sched->prio_requests[i], 0);
+	}
 
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
 	INIT_LIST_HEAD(&sched->ring_mirror_list);
 	spin_lock_init(&sched->job_list_lock);
+	spin_lock_init(&sched->min_required_prio_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	atomic64_set(&sched->job_id_count, 0);
+	sched->min_required_prio = AMD_SCHED_PRIORITY_MIN;
 
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -668,3 +696,42 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
 	if (sched->thread)
 		kthread_stop(sched->thread);
 }
+
+void amd_sched_min_priority_get(struct amd_gpu_scheduler *sched,
+				enum amd_sched_priority priority)
+{
+	atomic_inc(&sched->prio_requests[priority]);
+
+	spin_lock(&sched->min_required_prio_lock);
+	if (priority > sched->min_required_prio)
+		sched->min_required_prio = priority;
+	spin_unlock(&sched->min_required_prio_lock);
+}
+
+void amd_sched_min_priority_put(struct amd_gpu_scheduler *sched,
+				enum amd_sched_priority priority)
+{
+	int i;
+
+	if (atomic_dec_return(&sched->prio_requests[priority]) > 0)
+		return;
+
+	if (priority == AMD_SCHED_PRIORITY_MIN)
+		return;
+
+	spin_lock(&sched->min_required_prio_lock);
+	if (sched->min_required_prio > priority)
+		goto out_unlock;
+
+	for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+		if (i == AMD_SCHED_PRIORITY_MIN
+				|| atomic_read(&sched->prio_requests[i])) {
+			sched->min_required_prio = i;
+			break;
+		}
+	}
+	amd_sched_wakeup(sched);
+
+out_unlock:
+	spin_unlock(&sched->min_required_prio_lock);
+}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index da040bc..c489708 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,9 +38,11 @@ struct amd_sched_rq;
 */
 struct amd_sched_entity {
 	struct list_head		list;
-	struct amd_sched_rq		*rq;
 	struct amd_gpu_scheduler	*sched;
 
+	spinlock_t			rq_lock;
+	struct amd_sched_rq		*rq;
+
 	spinlock_t			queue_lock;
 	struct kfifo                    job_queue;
 
@@ -140,12 +142,20 @@ struct amd_gpu_scheduler {
 	struct task_struct		*thread;
 	struct list_head	ring_mirror_list;
 	spinlock_t			job_list_lock;
+	atomic_t			prio_requests[AMD_SCHED_PRIORITY_MAX];
+	/* minimum priority requrired for a job to be scheduled */
+	enum amd_sched_priority		min_required_prio;
+	spinlock_t			min_required_prio_lock;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
 		   const struct amd_sched_backend_ops *ops,
 		   uint32_t hw_submission, long timeout, const char *name);
 void amd_sched_fini(struct amd_gpu_scheduler *sched);
+void amd_sched_min_priority_get(struct amd_gpu_scheduler *sched,
+				enum amd_sched_priority priority);
+void amd_sched_min_priority_put(struct amd_gpu_scheduler *sched,
+				enum amd_sched_priority priority);
 
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 			  struct amd_sched_entity *entity,
@@ -154,6 +164,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 			   struct amd_sched_entity *entity);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+			     struct amd_sched_rq *rq);
 
 int amd_sched_fence_slab_init(void);
 void amd_sched_fence_slab_fini(void);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 27d0a822..5afb5a8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -52,6 +52,7 @@ extern "C" {
 #define DRM_AMDGPU_GEM_USERPTR		0x11
 #define DRM_AMDGPU_WAIT_FENCES		0x12
 #define DRM_AMDGPU_VM			0x13
+#define DRM_AMDGPU_SCHED		0x14
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -67,6 +68,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_GEM_USERPTR	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
 #define DRM_IOCTL_AMDGPU_WAIT_FENCES	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
 #define DRM_IOCTL_AMDGPU_VM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
+#define DRM_IOCTL_AMDGPU_SCHED		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
 
 #define AMDGPU_GEM_DOMAIN_CPU		0x1
 #define AMDGPU_GEM_DOMAIN_GTT		0x2
@@ -219,6 +221,30 @@ union drm_amdgpu_vm {
 	struct drm_amdgpu_vm_out out;
 };
 
+/* sched ioctl */
+#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_SET	1
+#define AMDGPU_SCHED_OP_MIN_PRIORITY_GET	2
+#define AMDGPU_SCHED_OP_MIN_PRIORITY_PUT	3
+
+struct drm_amdgpu_sched_in {
+	/* AMDGPU_SCHED_OP_* */
+	__u32	op;
+	__u32	fd;
+	__s32	priority;
+	/* For future use */
+	__u32	reserved;
+};
+
+struct drm_amdgpu_sched_out {
+	/* For future use */
+	__u64	reserved;
+};
+
+union drm_amdgpu_sched {
+	struct drm_amdgpu_sched_in in;
+	struct drm_amdgpu_sched_out out;
+};
+
 /*
  * This is not a reliable API and you should expect it to fail for any
  * number of reasons and have fallback path that do not use userptr to
-- 
2.9.3



More information about the amd-gfx mailing list