[Intel-xe] [PATCH 1/2] drm/xe: Deprecate XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE implementation
Niranjana Vishwanathapura
niranjana.vishwanathapura at intel.com
Wed Sep 13 22:04:33 UTC 2023
On Wed, Sep 13, 2023 at 02:34:04PM -0700, Niranjana Vishwanathapura wrote:
>On Tue, Sep 12, 2023 at 04:51:58PM -0700, Matthew Brost wrote:
>>We are going to remove XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE from the
>>uAPI, deprecate the implementation first by making
>>XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE a NOP.
>>
>
>So, only make it creation time parameter, right?
>May be mention that in the commit text here?
I meant fix it at creation time based on whether VM is in compute mode.
>
>>Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>>---
>>drivers/gpu/drm/xe/xe_exec_queue.c | 57 ++++++------------------
>>drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 +--
>>2 files changed, 16 insertions(+), 47 deletions(-)
>>
>>diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
>>index 6725157d8c1d..ac392bcf7bc8 100644
>>--- a/drivers/gpu/drm/xe/xe_exec_queue.c
>>+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
>>@@ -323,39 +323,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
>>static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q,
>> u64 value, bool create)
>>{
>>- if (XE_IOCTL_DBG(xe, !create))
>>- return -EINVAL;
>>-
>>- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
>>- return -EINVAL;
>>-
>>- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
>>- return -EINVAL;
>>-
>>- if (value) {
>>- struct xe_vm *vm = q->vm;
>>- int err;
>>-
>>- if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
>>- return -EOPNOTSUPP;
>>-
>>- if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
>>- return -EOPNOTSUPP;
>>-
>>- if (XE_IOCTL_DBG(xe, q->width != 1))
>>- return -EINVAL;
>>-
>>- q->compute.context = dma_fence_context_alloc(1);
>>- spin_lock_init(&q->compute.lock);
>>-
>>- err = xe_vm_add_compute_exec_queue(vm, q);
>>- if (XE_IOCTL_DBG(xe, err))
>>- return err;
>>-
>>- q->flags |= EXEC_QUEUE_FLAG_COMPUTE_MODE;
>>- q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
>>- }
>>-
>> return 0;
>>}
>>
>>@@ -365,7 +332,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue
>> if (XE_IOCTL_DBG(xe, !create))
>> return -EINVAL;
>>
>>- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
>>+ if (XE_IOCTL_DBG(xe, xe_vm_in_compute_mode(q->vm)))
>> return -EINVAL;
>>
>> if (value)
>>@@ -742,18 +709,21 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
>> xe_vm_put(vm);
>> if (IS_ERR(q))
>> return PTR_ERR(q);
>>+
>>+ if (xe_vm_in_compute_mode(vm)) {
>>+ q->compute.context = dma_fence_context_alloc(1);
>>+ spin_lock_init(&q->compute.lock);
>>+
>>+ err = xe_vm_add_compute_exec_queue(vm, q);
>>+ if (XE_IOCTL_DBG(xe, err))
>>+ goto put_exec_queue;
>
>Don't we need the below checks here?
>
>if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
> return -EOPNOTSUPP;
>
>if (XE_IOCTL_DBG(xe, q->width != 1))
> return -EINVAL;
>
>>+ }
>> }
>>
>> if (args->extensions) {
>> err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
>> if (XE_IOCTL_DBG(xe, err))
>>- goto put_exec_queue;
>>- }
>>-
>>- if (XE_IOCTL_DBG(xe, q->vm && xe_vm_in_compute_mode(q->vm) !=
>>- !!(q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))) {
>>- err = -EOPNOTSUPP;
>>- goto put_exec_queue;
>>+ goto kill_exec_queue;
>> }
>>
>> q->persistent.xef = xef;
>>@@ -762,14 +732,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
>> err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
>> mutex_unlock(&xef->exec_queue.lock);
>> if (err)
>>- goto put_exec_queue;
>>+ goto kill_exec_queue;
>>
>> args->exec_queue_id = id;
>>
>> return 0;
>>
>>-put_exec_queue:
>>+kill_exec_queue:
>> xe_exec_queue_kill(q);
>>+put_exec_queue:
>> xe_exec_queue_put(q);
>
>Not directly related to this patch, but now that I am looking at it, probably
>the function exec_queue_kill_compute() should be renamed to
>xe_vm_remove_compute_exec_queue() and move it to xe_vm.c?
>
>Niranjana
>
>> return err;
>>}
>>diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>>index 347d28442701..f73b9e80b25a 100644
>>--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
>>+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>>@@ -60,12 +60,10 @@ struct xe_exec_queue {
>>#define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
>>/* queue keeps running pending jobs after destroy ioctl */
>>#define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
>>-/* queue for use with compute VMs */
>>-#define EXEC_QUEUE_FLAG_COMPUTE_MODE BIT(4)
>>/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
>>-#define EXEC_QUEUE_FLAG_VM BIT(5)
>>+#define EXEC_QUEUE_FLAG_VM BIT(4)
>>/* child of VM queue for multi-tile VM jobs */
>>-#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6)
>>+#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
>>
>> /**
>> * @flags: flags for this exec queue, should statically setup aside from ban
>>--
>>2.34.1
>>
More information about the Intel-xe
mailing list