[PATCH] drm/syncobj: Allow wait for submit and signal behavior (v2)
Chris Wilson
chris at chris-wilson.co.uk
Thu Aug 10 11:00:09 UTC 2017
Quoting Jason Ekstrand (2017-08-10 00:53:00)
> On Wed, Aug 9, 2017 at 3:41 PM, Chris Wilson <chris at chris-wilson.co.uk> wrote:
> However, this installs the proxy into syncobj->fence with the result
> that any concurrent wait also become a WAIT_FOR_SUBMIT. The behaviour
> of drm_syncobj is then quite inconsistent, sometimes it will wait for a
> future fence, sometimes it will report an error.
>
>
> Yeah, that's not good. I thought about a variety of solutions to try and
> re-use more core dma_fence code. Ultimately I chose the current one because it
> takes a snapshot of the syncobjs and then, from that point forward, it's
> consistent with its snapshot. Nothing I was able to come up with based on core
> dma_fence wait routines does that.
So if we choose to keep the proxy local to the fence-array and not replace
syncobj->fence, we can still reduce this to a plain dma-fence-array +
wait.
Pertinent details:
+static void syncobj_notify(struct drm_syncobj *syncobj, struct dma_fence *fence)
+{
+ struct drm_syncobj_cb *cb, *cn;
+ unsigned long flags;
+
+ /* This is just an opencoded waitqueue; FIXME! */
+ spin_lock_irqsave(&syncobj->lock, flags);
+ list_for_each_entry_safe(cb, cn, &syncobj->cb_list, link)
+ cb->func(cb, fence);
+ INIT_LIST_HEAD(&syncobj->cb_list);
+ spin_unlock_irqrestore(&syncobj->lock, flags);
+}
+
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
@@ -89,7 +109,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
if (fence)
dma_fence_get(fence);
+
old_fence = xchg(&syncobj->fence, fence);
+ if (!old_fence && fence)
+ syncobj_notify(syncobj, fence);
dma_fence_put(old_fence);
}
@@ -124,6 +147,9 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
+
+ syncobj_notify(syncobj, NULL);
+
dma_fence_put(syncobj->fence);
kfree(syncobj);
}
@@ -140,6 +166,8 @@ static int drm_syncobj_create(struct drm_file *file_private,
return -ENOMEM;
kref_init(&syncobj->refcount);
+ spin_lock_init(&syncobj->lock);
+ INIT_LIST_HEAD(&syncobj->cb_list);
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
struct future_fence {
+ struct drm_syncobj_cb base;
+ struct dma_fence **slot;
+};
+
+static void future_fence_cb(struct drm_syncobj_cb *cb, struct dma_fence *fence)
+{
+ struct future_fence *ff = container_of(cb, typeof(*ff), base);
+
+ if (fence)
+ dma_fence_replace_proxy(ff->slot, fence);
+ else
+ dma_fence_signal(*ff->slot);
+
+ kfree(ff);
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_wait *args = data;
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ unsigned int count, i;
+ long timeout;
+ u32 *handles;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*fences) < sizeof(*handles));
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return -EINVAL;
+
+ count = args->count_handles;
+ if (!count)
+ return -EINVAL;
+
+ /* Get the handles from userspace */
+ fences = kmalloc_array(count,
+ sizeof(struct dma_fence *),
+ __GFP_NOWARN | GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ handles = (void *)fences + count * (sizeof(*fences) - sizeof(*handles));
+ if (copy_from_user(handles,
+ u64_to_user_ptr(args->handles),
+ sizeof(u32) * count)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct drm_syncobj *s;
+
+ ret = -ENOENT;
+ s = drm_syncobj_find(file_private, handles[i]);
+ if (s) {
+ ret = 0;
+ spin_lock_irq(&s->lock);
+ if (s->fence) {
+ fences[i] = dma_fence_get(s->fence);
+ } else if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ struct future_fence *cb;
+
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ fences[i] = dma_fence_create_proxy();
+ if (cb && fences[i]) {
+ cb->slot = &fences[i];
+ cb->base.func = future_fence_cb;
+ list_add(&cb->base.link, &s->cb_list);
+ } else {
+ kfree(cb);
+ dma_fence_put(fences[i]);
+ ret = -ENOMEM;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&s->lock);
+ }
+ if (ret) {
+ count = i;
+ goto err_fences;
+ }
+ }
+
+ array = dma_fence_array_create(count, fences, 0, 0,
+ !(args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL));
+ if (!array) {
+ ret = -ENOMEM;
+ goto err_fences;
+ }
+
+ timeout = drm_timeout_abs_to_jiffies(args->timeout_nsec);
+ timeout = dma_fence_wait_timeout(&array->base, true, timeout);
+ args->first_signaled = array->first_signaled;
+ dma_fence_put(&array->base);
+
+ return timeout < 0 ? timeout : timeout == 0 ? -ETIME : 0; /* Gah. */
+
+err_fences:
+ for (i = 0; i < count; i++)
+ dma_fence_put(fences[i]);
+err:
+ kfree(fences);
+ return ret;
+}
More information about the dri-devel
mailing list