[PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
Matthew Brost
matthew.brost at intel.com
Tue Dec 12 00:55:46 UTC 2023
On Mon, Dec 11, 2023 at 07:10:45PM -0500, Rodrigo Vivi wrote:
> Adjust to recent drm-scheduler changes that already landed in drm-next
>
> Cc: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> ---
> drivers/gpu/drm/xe/xe_exec_queue.c | 8 +++----
> drivers/gpu/drm/xe/xe_exec_queue.h | 2 +-
> drivers/gpu/drm/xe/xe_exec_queue_types.h | 14 ++++++++++--
> drivers/gpu/drm/xe/xe_execlist.c | 25 +++++++++------------
> drivers/gpu/drm/xe/xe_execlist_types.h | 6 ++---
> drivers/gpu/drm/xe/xe_gpu_scheduler.h | 2 +-
> drivers/gpu/drm/xe/xe_gpu_scheduler_types.h | 1 -
> drivers/gpu/drm/xe/xe_guc_submit.c | 18 +++++++--------
> drivers/gpu/drm/xe/xe_migrate.c | 2 +-
> drivers/gpu/drm/xe/xe_sched_job.c | 2 +-
> 10 files changed, 43 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 91d67f4da2cc..85bc25fe99ed 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -219,17 +219,17 @@ struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
> return q;
> }
>
> -enum drm_sched_priority
> +enum xe_exec_queue_priority
> xe_exec_queue_device_get_max_priority(struct xe_device *xe)
> {
> - return capable(CAP_SYS_NICE) ? DRM_SCHED_PRIORITY_HIGH :
> - DRM_SCHED_PRIORITY_NORMAL;
> + return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
> + XE_EXEC_QUEUE_PRIORITY_NORMAL;
> }
>
> static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
> u64 value, bool create)
> {
> - if (XE_IOCTL_DBG(xe, value > DRM_SCHED_PRIORITY_HIGH))
> + if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
> return -EINVAL;
>
> if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
> index 8b587d1b2c2c..d959cc4a1a82 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.h
> @@ -57,7 +57,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> -enum drm_sched_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
> +enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
>
> void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
> void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> index 5ba47a5cfdbd..6826feb650f3 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> @@ -21,6 +21,16 @@ struct xe_guc_exec_queue;
> struct xe_hw_engine;
> struct xe_vm;
>
> +enum xe_exec_queue_priority {
> + XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
> + XE_EXEC_QUEUE_PRIORITY_LOW = 0,
> + XE_EXEC_QUEUE_PRIORITY_NORMAL,
> + XE_EXEC_QUEUE_PRIORITY_HIGH,
> + XE_EXEC_QUEUE_PRIORITY_KERNEL,
> +
> + XE_EXEC_QUEUE_PRIORITY_COUNT
> +};
> +
> /**
> * struct xe_exec_queue - Execution queue
> *
> @@ -43,7 +53,7 @@ struct xe_exec_queue {
> /** @class: class of this exec queue */
> enum xe_engine_class class;
> /** @priority: priority of this exec queue */
> - enum xe_sched_priority priority;
> + enum xe_exec_queue_priority priority;
> /**
> * @logical_mask: logical mask of where job submitted to exec queue can run
> */
> @@ -182,7 +192,7 @@ struct xe_exec_queue_ops {
> void (*fini)(struct xe_exec_queue *q);
> /** @set_priority: Set priority for exec queue */
> int (*set_priority)(struct xe_exec_queue *q,
> - enum drm_sched_priority priority);
> + enum xe_exec_queue_priority priority);
> /** @set_timeslice: Set timeslice for exec queue */
> int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
> /** @set_preempt_timeout: Set preemption timeout for exec queue */
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index e8754adfc52a..d82b50de144e 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -33,8 +33,6 @@
> #define XEHP_SW_CTX_ID_SHIFT 39
> #define XEHP_SW_CTX_ID_WIDTH 16
>
> -#define XE_SCHED_PRIORITY_UNSET -2
> -
> #define SW_CTX_ID \
> GENMASK_ULL(SW_CTX_ID_WIDTH + SW_CTX_ID_SHIFT - 1, \
> SW_CTX_ID_SHIFT)
> @@ -154,7 +152,7 @@ static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
> list_del(&exl->active_link);
>
> if (xe_execlist_is_idle(exl)) {
> - exl->active_priority = XE_SCHED_PRIORITY_UNSET;
> + exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
> continue;
> }
>
> @@ -202,7 +200,7 @@ static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
> }
>
> static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
> - enum drm_sched_priority priority)
> + enum xe_exec_queue_priority priority)
> {
> xe_execlist_port_assert_held(port);
>
> @@ -215,22 +213,22 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
> static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
> {
> struct xe_execlist_port *port = exl->port;
> - enum drm_sched_priority priority = exl->entity.priority;
> + enum xe_exec_queue_priority priority = exl->active_priority;
>
> - XE_WARN_ON(priority == XE_SCHED_PRIORITY_UNSET);
> + XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
> XE_WARN_ON(priority < 0);
> XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
>
> spin_lock_irq(&port->lock);
>
> if (exl->active_priority != priority &&
> - exl->active_priority != XE_SCHED_PRIORITY_UNSET) {
> + exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET) {
> /* Priority changed, move it to the right list */
> list_del(&exl->active_link);
> - exl->active_priority = XE_SCHED_PRIORITY_UNSET;
> + exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
> }
>
> - if (exl->active_priority == XE_SCHED_PRIORITY_UNSET) {
> + if (exl->active_priority == XE_EXEC_QUEUE_PRIORITY_UNSET) {
> exl->active_priority = priority;
> list_add_tail(&exl->active_link, &port->active[priority]);
> }
> @@ -344,14 +342,13 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
> goto err_free;
>
> sched = &exl->sched;
> - err = drm_sched_entity_init(&exl->entity, DRM_SCHED_PRIORITY_MIN,
> - &sched, 1, NULL);
> + err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
> if (err)
> goto err_sched;
>
> exl->port = q->hwe->exl_port;
> exl->has_run = false;
> - exl->active_priority = XE_SCHED_PRIORITY_UNSET;
> + exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
> q->execlist = exl;
> q->entity = &exl->entity;
>
> @@ -378,7 +375,7 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
> xe_assert(xe, !xe_device_uc_enabled(xe));
>
> spin_lock_irqsave(&exl->port->lock, flags);
> - if (WARN_ON(exl->active_priority != XE_SCHED_PRIORITY_UNSET))
> + if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
> list_del(&exl->active_link);
> spin_unlock_irqrestore(&exl->port->lock, flags);
>
> @@ -403,7 +400,7 @@ static void execlist_exec_queue_fini(struct xe_exec_queue *q)
> }
>
> static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
> - enum drm_sched_priority priority)
> + enum xe_exec_queue_priority priority)
> {
> /* NIY */
> return 0;
> diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
> index ee1fccd4ee8b..f94bbf4c53e4 100644
> --- a/drivers/gpu/drm/xe/xe_execlist_types.h
> +++ b/drivers/gpu/drm/xe/xe_execlist_types.h
> @@ -10,7 +10,7 @@
> #include <linux/spinlock.h>
> #include <linux/workqueue.h>
>
> -#include <drm/gpu_scheduler.h>
> +#include "xe_exec_queue_types.h"
>
> struct xe_hw_engine;
> struct xe_execlist_exec_queue;
> @@ -20,7 +20,7 @@ struct xe_execlist_port {
>
> spinlock_t lock;
>
> - struct list_head active[DRM_SCHED_PRIORITY_COUNT];
> + struct list_head active[XE_EXEC_QUEUE_PRIORITY_COUNT];
>
> u32 last_ctx_id;
>
> @@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
>
> struct work_struct fini_async;
>
> - enum drm_sched_priority active_priority;
> + enum xe_exec_queue_priority active_priority;
> struct list_head active_link;
> };
>
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> index ea785bcd3eb2..10c6bb9c9386 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> @@ -63,7 +63,7 @@ static inline int
> xe_sched_entity_init(struct xe_sched_entity *entity,
> struct xe_gpu_scheduler *sched)
> {
> - return drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN,
> + return drm_sched_entity_init(entity, 0,
> (struct drm_gpu_scheduler **)&sched,
> 1, NULL);
> }
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> index 86133835d4d1..6731b13da8bb 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> @@ -53,6 +53,5 @@ struct xe_gpu_scheduler {
>
> #define xe_sched_entity drm_sched_entity
> #define xe_sched_policy drm_sched_policy
> -#define xe_sched_priority drm_sched_priority
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 46b132ee1d3a..ad5e19ecd33c 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -410,25 +410,25 @@ MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
> MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
> #undef MAKE_EXEC_QUEUE_POLICY_ADD
>
> -static const int xe_sched_prio_to_guc[] = {
> - [DRM_SCHED_PRIORITY_MIN] = GUC_CLIENT_PRIORITY_NORMAL,
> - [DRM_SCHED_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
> - [DRM_SCHED_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
> - [DRM_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
> +static const int xe_exec_queue_prio_to_guc[] = {
> + [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
> + [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
> + [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
> + [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
> };
>
> static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
> {
> struct exec_queue_policy policy;
> struct xe_device *xe = guc_to_xe(guc);
> - enum xe_sched_priority prio = q->priority;
> + enum xe_exec_queue_priority prio = q->priority;
> u32 timeslice_us = q->sched_props.timeslice_us;
> u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
>
> xe_assert(xe, exec_queue_registered(q));
>
> __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
> - __guc_exec_queue_policy_add_priority(&policy, xe_sched_prio_to_guc[prio]);
> + __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
> __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
> __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
>
> @@ -1227,7 +1227,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
> err = xe_sched_entity_init(&ge->entity, sched);
> if (err)
> goto err_sched;
> - q->priority = DRM_SCHED_PRIORITY_NORMAL;
> + q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
>
> if (xe_exec_queue_is_lr(q))
> INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
> @@ -1293,7 +1293,7 @@ static void guc_exec_queue_fini(struct xe_exec_queue *q)
> }
>
> static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
> - enum xe_sched_priority priority)
> + enum xe_exec_queue_priority priority)
> {
> struct xe_sched_msg *msg;
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index be2a92dee52c..2ca927f3fb2a 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -398,7 +398,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
> return ERR_CAST(m->q);
> }
> if (xe->info.has_usm)
> - m->q->priority = DRM_SCHED_PRIORITY_KERNEL;
> + m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
>
> mutex_init(&m->job_mutex);
>
> diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> index b467d5bfa4ac..a9c7ae815bec 100644
> --- a/drivers/gpu/drm/xe/xe_sched_job.c
> +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> @@ -104,7 +104,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
> kref_init(&job->refcount);
> xe_exec_queue_get(job->q);
>
> - err = drm_sched_job_init(&job->drm, q->entity, NULL);
> + err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
> if (err)
> goto err_free;
>
> --
> 2.43.0
>
More information about the Intel-xe
mailing list