[Intel-xe] [RFC PATCH 3/7] drm/xe: Allow num_binds == 0 in VM bind IOCTL
Matthew Brost
matthew.brost at intel.com
Thu Dec 7 05:57:25 UTC 2023
The idea being out-syncs can signal indicating all previous operations
on the bind queue are complete. An example use case of this would be
support for implementing vkQueueWaitIdle easily.
v2: s/vkQueueWaitForIdle/vkQueueWaitIdle
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/xe_sync.c | 4 ++++
drivers/gpu/drm/xe/xe_sync.h | 1 +
drivers/gpu/drm/xe/xe_vm.c | 36 ++++++++++++++++++++++--------------
3 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 2a3f508722fc..d0f118223fa2 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -104,6 +104,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
int err;
bool exec = flags & SYNC_PARSE_FLAG_EXEC;
bool in_lr_mode = flags & SYNC_PARSE_FLAG_LR_MODE;
+ bool disallow_user_fence = flags & SYNC_PARSE_FLAG_DISALLOW_USER_FENCE;
bool signal;
if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
@@ -164,6 +165,9 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
break;
case DRM_XE_SYNC_TYPE_USER_FENCE:
+ if (XE_IOCTL_DBG(xe, disallow_user_fence))
+ return -EOPNOTSUPP;
+
if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP;
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 1b748cec4678..45f4371e94b9 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -14,6 +14,7 @@ struct xe_sched_job;
#define SYNC_PARSE_FLAG_EXEC BIT(0)
#define SYNC_PARSE_FLAG_LR_MODE BIT(1)
+#define SYNC_PARSE_FLAG_DISALLOW_USER_FENCE BIT(2)
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 42077e3db36a..f6de0584ea91 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2834,7 +2834,6 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, !args->num_binds) ||
XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
return -EINVAL;
@@ -2987,7 +2986,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_exec_queue;
}
- if (XE_IOCTL_DBG(xe, async !=
+ if (XE_IOCTL_DBG(xe, args->num_binds && async !=
!!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
err = -EINVAL;
goto put_exec_queue;
@@ -3001,7 +3000,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (!args->exec_queue_id) {
- if (XE_IOCTL_DBG(xe, async !=
+ if (XE_IOCTL_DBG(xe, args->num_binds && async !=
!!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
err = -EINVAL;
goto put_vm;
@@ -3028,16 +3027,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
- bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
- if (!bos) {
- err = -ENOMEM;
- goto release_vm_lock;
- }
+ if (args->num_binds) {
+ bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+ if (!bos) {
+ err = -ENOMEM;
+ goto release_vm_lock;
+ }
- ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
- if (!ops) {
- err = -ENOMEM;
- goto release_vm_lock;
+ ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+ if (!ops) {
+ err = -ENOMEM;
+ goto release_vm_lock;
+ }
}
for (i = 0; i < args->num_binds; ++i) {
@@ -3107,12 +3108,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs],
- xe_vm_in_lr_mode(vm) ?
- SYNC_PARSE_FLAG_LR_MODE : 0);
+ (xe_vm_in_lr_mode(vm) ?
+ SYNC_PARSE_FLAG_LR_MODE : 0) |
+ (!args->num_binds ?
+ SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
if (err)
goto free_syncs;
}
+ if (!args->num_binds) {
+ err = -ENODATA;
+ goto free_syncs;
+ }
+
for (i = 0; i < args->num_binds; ++i) {
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
--
2.34.1
More information about the Intel-xe
mailing list