[PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
Rodrigo Vivi
rodrigo.vivi at intel.com
Mon Dec 11 19:13:06 UTC 2023
Adjust to recent drm-scheduler changes that already landed in drm-next
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue.c | 6 +++---
drivers/gpu/drm/xe/xe_execlist.c | 3 +--
drivers/gpu/drm/xe/xe_execlist_types.h | 4 ++--
drivers/gpu/drm/xe/xe_gpu_scheduler.h | 2 +-
drivers/gpu/drm/xe/xe_gpu_scheduler_types.h | 6 ++++++
drivers/gpu/drm/xe/xe_guc_submit.c | 10 +++++-----
drivers/gpu/drm/xe/xe_migrate.c | 2 +-
drivers/gpu/drm/xe/xe_sched_job.c | 2 +-
8 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 91d67f4da2cc..3cf7f5d3d5e3 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -222,14 +222,14 @@ struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
enum drm_sched_priority
xe_exec_queue_device_get_max_priority(struct xe_device *xe)
{
- return capable(CAP_SYS_NICE) ? DRM_SCHED_PRIORITY_HIGH :
- DRM_SCHED_PRIORITY_NORMAL;
+ return capable(CAP_SYS_NICE) ? XE_SCHED_PRIORITY_HIGH :
+ XE_SCHED_PRIORITY_NORMAL;
}
static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
- if (XE_IOCTL_DBG(xe, value > DRM_SCHED_PRIORITY_HIGH))
+ if (XE_IOCTL_DBG(xe, value > XE_SCHED_PRIORITY_HIGH))
return -EINVAL;
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index e8754adfc52a..e6a94d884115 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -344,8 +344,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
goto err_free;
sched = &exl->sched;
- err = drm_sched_entity_init(&exl->entity, DRM_SCHED_PRIORITY_MIN,
- &sched, 1, NULL);
+ err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
if (err)
goto err_sched;
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index ee1fccd4ee8b..38bb6f0709c6 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -10,7 +10,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <drm/gpu_scheduler.h>
+#include <xe_gpu_scheduler_types.h>
struct xe_hw_engine;
struct xe_execlist_exec_queue;
@@ -20,7 +20,7 @@ struct xe_execlist_port {
spinlock_t lock;
- struct list_head active[DRM_SCHED_PRIORITY_COUNT];
+ struct list_head active[XE_SCHED_PRIORITY_COUNT];
u32 last_ctx_id;
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index ea785bcd3eb2..10c6bb9c9386 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -63,7 +63,7 @@ static inline int
xe_sched_entity_init(struct xe_sched_entity *entity,
struct xe_gpu_scheduler *sched)
{
- return drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN,
+ return drm_sched_entity_init(entity, 0,
(struct drm_gpu_scheduler **)&sched,
1, NULL);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
index 86133835d4d1..941a360af2f5 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
@@ -8,6 +8,12 @@
#include <drm/gpu_scheduler.h>
+#define XE_SCHED_PRIORITY_LOW 0
+#define XE_SCHED_PRIORITY_NORMAL 1
+#define XE_SCHED_PRIORITY_HIGH 2
+#define XE_SCHED_PRIORITY_KERNEL 3
+#define XE_SCHED_PRIORITY_COUNT 3
+
/**
* struct xe_sched_msg - an in-band (relative to GPU scheduler run queue)
* message
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 46b132ee1d3a..bfa8fb710cdf 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -411,10 +411,10 @@ MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
#undef MAKE_EXEC_QUEUE_POLICY_ADD
static const int xe_sched_prio_to_guc[] = {
- [DRM_SCHED_PRIORITY_MIN] = GUC_CLIENT_PRIORITY_NORMAL,
- [DRM_SCHED_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
- [DRM_SCHED_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
- [DRM_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
+ [XE_SCHED_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
+ [XE_SCHED_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ [XE_SCHED_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
+ [XE_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
};
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -1227,7 +1227,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
err = xe_sched_entity_init(&ge->entity, sched);
if (err)
goto err_sched;
- q->priority = DRM_SCHED_PRIORITY_NORMAL;
+ q->priority = XE_SCHED_PRIORITY_NORMAL;
if (xe_exec_queue_is_lr(q))
INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index be2a92dee52c..1d22aac57478 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -398,7 +398,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
return ERR_CAST(m->q);
}
if (xe->info.has_usm)
- m->q->priority = DRM_SCHED_PRIORITY_KERNEL;
+ m->q->priority = XE_SCHED_PRIORITY_KERNEL;
mutex_init(&m->job_mutex);
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index b467d5bfa4ac..a9c7ae815bec 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -104,7 +104,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
kref_init(&job->refcount);
xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, q->entity, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
if (err)
goto err_free;
--
2.43.0
More information about the Intel-xe
mailing list