[PATCH 1/3] [RFC]drm: add syncobj timeline support v4

Christian König ckoenig.leichtzumerken at gmail.com
Wed Sep 12 11:05:18 UTC 2018


Am 12.09.2018 um 12:20 schrieb zhoucm1:
> [SNIP]
>>> Drop the term semaphore here, better use syncobj.
> This is from VK_KHR_timeline_semaphore extension describe, not my 
> invention, I just quote it. In kernel side, we call syncobj, in UMD, 
> they still call semaphore.

Yeah, but we don't care about close source UMD names in the kernel and 
the open source UMD calls it syncobj as well.

>>>
>>>> [SNIP]
>>>> +static void drm_syncobj_find_signal_pt_for_wait_pt(struct 
>>>> drm_syncobj *syncobj,
>>>> +                           struct drm_syncobj_wait_pt *wait_pt)
>>>> +{
>>>
>>> That whole approach still looks horrible complicated to me.
> It's already very close to what you said before.
>
>>>
>>> Especially the separation of signal and wait pt is completely 
>>> unnecessary as far as I can see.
>>> When a wait pt is requested we just need to search for the signal 
>>> point which it will trigger.
> Yeah, I tried this, but when I implement cpu wait ioctl on specific 
> point, we need a advanced wait pt fence, otherwise, we could still 
> need old syncobj cb.

Why? I mean you just need to call drm_syncobj_find_fence() and when that 
one returns NULL you use wait_event_*() to wait for a signal point >= 
your wait point to appear and try again.

Regards,
Christian.

>
>
> Thanks,
> David Zhou
>>>
>>> Regards,
>>> Christian.
>>>
>>>> +    struct drm_syncobj_timeline *timeline = 
>>>> &syncobj->syncobj_timeline;
>>>> +    struct drm_syncobj_signal_pt *signal_pt;
>>>> +    int ret;
>>>> +
>>>> +    if (wait_pt->signal_pt_fence) {
>>>> +        return;
>>>> +    } else if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) &&
>>>> +           (wait_pt->value <= timeline->timeline)) {
>>>> +        dma_fence_signal(&wait_pt->base.base);
>>>> +        rb_erase(&wait_pt->node,
>>>> +             &timeline->wait_pt_tree);
>>>> +        RB_CLEAR_NODE(&wait_pt->node);
>>>> +        dma_fence_put(&wait_pt->base.base);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    list_for_each_entry(signal_pt, &timeline->signal_pt_list, list) {
>>>> +        if (wait_pt->value < signal_pt->value)
>>>> +            continue;
>>>> +        if ((syncobj->type == DRM_SYNCOBJ_TYPE_NORMAL) &&
>>>> +            (wait_pt->value != signal_pt->value))
>>>> +            continue;
>>>> +        wait_pt->signal_pt_fence = 
>>>> dma_fence_get(&signal_pt->base->base);
>>>> +        ret = dma_fence_add_callback(wait_pt->signal_pt_fence,
>>>> +                         &wait_pt->wait_cb,
>>>> +                         wait_pt_func);
>>>> +        if (ret == -ENOENT) {
>>>> +            dma_fence_signal(&wait_pt->base.base);
>>>> +            dma_fence_put(wait_pt->signal_pt_fence);
>>>> +            wait_pt->signal_pt_fence = NULL;
>>>> +            rb_erase(&wait_pt->node,
>>>> +                 &timeline->wait_pt_tree);
>>>> +            RB_CLEAR_NODE(&wait_pt->node);
>>>> +            dma_fence_put(&wait_pt->base.base);
>>>> +        } else if (ret < 0) {
>>>> +            dma_fence_put(wait_pt->signal_pt_fence);
>>>> +            DRM_ERROR("add callback error!");
>>>> +        } else {
>>>> +            /* after adding callback, remove from rb tree */
>>>> +            rb_erase(&wait_pt->node,
>>>> +                 &timeline->wait_pt_tree);
>>>> +            RB_CLEAR_NODE(&wait_pt->node);
>>>> +        }
>>>> +        return;
>>>> +    }
>>>> +    /* signaled pt was released */
>>>> +    if (!wait_pt->signal_pt_fence && (wait_pt->value <=
>>>> +                      timeline->signal_point)) {
>>>> +        dma_fence_signal(&wait_pt->base.base);
>>>> +        rb_erase(&wait_pt->node,
>>>> +             &timeline->wait_pt_tree);
>>>> +        RB_CLEAR_NODE(&wait_pt->node);
>>>> +        dma_fence_put(&wait_pt->base.base);
>>>> +    }
>>>>   }
>>>>   -void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
>>>> -                  struct drm_syncobj_cb *cb,
>>>> -                  drm_syncobj_func_t func)
>>>> +static int drm_syncobj_timeline_create_signal_pt(struct 
>>>> drm_syncobj *syncobj,
>>>> +                         struct dma_fence *fence,
>>>> +                         u64 point)
>>>>   {
>>>> +    struct drm_syncobj_signal_pt *signal_pt =
>>>> +        kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL);
>>>> +    struct drm_syncobj_signal_pt *tail_pt;
>>>> +    struct dma_fence **fences;
>>>> +    struct rb_node *node;
>>>> +    struct drm_syncobj_wait_pt *tail_wait_pt = NULL;
>>>> +    int num_fences = 0;
>>>> +    int ret = 0, i;
>>>> +
>>>> +    if (!signal_pt)
>>>> +        return -ENOMEM;
>>>> +    if (syncobj->syncobj_timeline.signal_point >= point) {
>>>> +        DRM_WARN("A later signal is ready!");
>>>> +        goto out;
>>>> +    }
>>>> +    if (!fence)
>>>> +        goto out;
>>>> +
>>>> +    fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL);
>>>> +    if (!fences)
>>>> +        goto out;
>>>> +    fences[num_fences++] = dma_fence_get(fence);
>>>> +    /* timeline syncobj must take this dependency */
>>>> +    if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
>>>> +        spin_lock(&syncobj->lock);
>>>> +        if (!list_empty(&syncobj->syncobj_timeline.signal_pt_list)) {
>>>> +            tail_pt = 
>>>> list_last_entry(&syncobj->syncobj_timeline.signal_pt_list,
>>>> +                          struct drm_syncobj_signal_pt, list);
>>>> +            fences[num_fences++] = 
>>>> dma_fence_get(&tail_pt->base->base);
>>>> +        }
>>>> +        spin_unlock(&syncobj->lock);
>>>> +    }
>>>> +    signal_pt->base = dma_fence_array_create(num_fences, fences,
>>>> + syncobj->syncobj_timeline.timeline_context,
>>>> +                         point, false);
>>>> +    if (!signal_pt->base)
>>>> +        goto fail;
>>>> +
>>>> +    signal_pt->value = point;
>>>>       spin_lock(&syncobj->lock);
>>>> -    drm_syncobj_add_callback_locked(syncobj, cb, func);
>>>> +    syncobj->syncobj_timeline.signal_point = point;
>>>> +    INIT_LIST_HEAD(&signal_pt->list);
>>>> +    list_add_tail(&signal_pt->list, 
>>>> &syncobj->syncobj_timeline.signal_pt_list);
>>>> +
>>>> +    /* check if there is advanced wait */
>>>> +    node = rb_last(&syncobj->syncobj_timeline.wait_pt_tree);
>>>> +    if (node)
>>>> +        tail_wait_pt = rb_entry(node, struct drm_syncobj_wait_pt, 
>>>> node);
>>>> +    if (tail_wait_pt && !tail_wait_pt->signal_pt_fence) {
>>>> +        for (node = 
>>>> rb_first(&syncobj->syncobj_timeline.wait_pt_tree);
>>>> +             node != NULL; node = rb_next(node)) {
>>>> +            struct drm_syncobj_wait_pt *wait_pt =
>>>> +                rb_entry(node, struct drm_syncobj_wait_pt,
>>>> +                     node);
>>>> +
>>>> +            drm_syncobj_find_signal_pt_for_wait_pt(syncobj,
>>>> +                                   wait_pt);
>>>> +        }
>>>> +    }
>>>> +
>>>>       spin_unlock(&syncobj->lock);
>>>> +    wake_up_all(&syncobj->syncobj_timeline.wq);
>>>> +
>>>> +    return 0;
>>>> +fail:
>>>> +    for (i = 0; i < num_fences; i++)
>>>> +        dma_fence_put(fences[i]);
>>>> +    kfree(fences);
>>>> +out:
>>>> +    kfree(signal_pt);
>>>> +    return ret;
>>>>   }
>>>>   -void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
>>>> -                 struct drm_syncobj_cb *cb)
>>>> +static void drm_syncobj_timeline_garbage_collection(struct 
>>>> drm_syncobj *syncobj)
>>>>   {
>>>> +    struct drm_syncobj_timeline *timeline = 
>>>> &syncobj->syncobj_timeline;
>>>> +    struct rb_node *node;
>>>> +    struct drm_syncobj_wait_pt *wait_pt;
>>>> +    struct drm_syncobj_signal_pt *signal_pt, *tmp;
>>>> +
>>>>       spin_lock(&syncobj->lock);
>>>> -    list_del_init(&cb->node);
>>>> +    node = rb_first(&timeline->wait_pt_tree);
>>>> +    while (node) {
>>>> +        wait_pt = rb_entry(node, struct drm_syncobj_wait_pt, node);
>>>> +        if (!dma_fence_is_signaled(&wait_pt->base.base)) {
>>>> +            node = rb_next(node);
>>>> +            continue;
>>>> +        }
>>>> +        rb_erase(&wait_pt->node,
>>>> +             &timeline->wait_pt_tree);
>>>> +        RB_CLEAR_NODE(&wait_pt->node);
>>>> +        /* kfree(wait_pt) is excuted by fence put */
>>>> +        dma_fence_put(&wait_pt->base.base);
>>>> +        node = rb_first(&timeline->wait_pt_tree);
>>>> +    }
>>>> +    list_for_each_entry_safe(signal_pt, tmp,
>>>> +                 &timeline->signal_pt_list, list) {
>>>> +        if (dma_fence_is_signaled(&signal_pt->base->base)) {
>>>> +            timeline->timeline = signal_pt->value;
>>>> +            list_del(&signal_pt->list);
>>>> + dma_fence_put(&signal_pt->base->base);
>>>> +            kfree(signal_pt);
>>>> +        } else {
>>>> +            /*signal_pt is in order in list, from small to big, so
>>>> +             * the later must not be signal either */
>>>> +            break;
>>>> +        }
>>>> +    }
>>>> +
>>>>       spin_unlock(&syncobj->lock);
>>>>   }
>>>> -
>>>>   /**
>>>>    * drm_syncobj_replace_fence - replace fence in a sync object.
>>>>    * @syncobj: Sync object to replace fence in
>>>> @@ -176,28 +368,29 @@ void drm_syncobj_replace_fence(struct 
>>>> drm_syncobj *syncobj,
>>>>                      u64 point,
>>>>                      struct dma_fence *fence)
>>>>   {
>>>> -    struct dma_fence *old_fence;
>>>> -    struct drm_syncobj_cb *cur, *tmp;
>>>> -
>>>> -    if (fence)
>>>> -        dma_fence_get(fence);
>>>> -
>>>> -    spin_lock(&syncobj->lock);
>>>> -
>>>> -    old_fence = rcu_dereference_protected(syncobj->fence,
>>>> - lockdep_is_held(&syncobj->lock));
>>>> -    rcu_assign_pointer(syncobj->fence, fence);
>>>> -
>>>> -    if (fence != old_fence) {
>>>> -        list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
>>>> -            list_del_init(&cur->node);
>>>> -            cur->func(syncobj, cur);
>>>> +    drm_syncobj_timeline_garbage_collection(syncobj);
>>>> +    if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
>>>> +        if (fence)
>>>> +            drm_syncobj_timeline_create_signal_pt(syncobj, fence,
>>>> +                                  point);
>>>> +        return;
>>>> +    } else if (syncobj->type == DRM_SYNCOBJ_TYPE_NORMAL) {
>>>> +        u64 pt_value;
>>>> +
>>>> +        if (!fence) {
>>>> +            drm_syncobj_timeline_fini(syncobj,
>>>> + &syncobj->syncobj_timeline);
>>>> +            drm_syncobj_timeline_init(syncobj,
>>>> + &syncobj->syncobj_timeline);
>>>> +            return;
>>>>           }
>>>> +        pt_value = syncobj->syncobj_timeline.signal_point +
>>>> +            DRM_SYNCOBJ_NORMAL_POINT;
>>>> +        drm_syncobj_timeline_create_signal_pt(syncobj, fence, 
>>>> pt_value);
>>>> +        return;
>>>> +    } else {
>>>> +        DRM_ERROR("the syncobj type isn't support\n");
>>>>       }
>>>> -
>>>> -    spin_unlock(&syncobj->lock);
>>>> -
>>>> -    dma_fence_put(old_fence);
>>>>   }
>>>>   EXPORT_SYMBOL(drm_syncobj_replace_fence);
>>>>   @@ -220,6 +413,120 @@ static int 
>>>> drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
>>>>       return 0;
>>>>   }
>>>>   +static struct drm_syncobj_wait_pt *
>>>> +drm_syncobj_timeline_lookup_wait_pt_fence(struct drm_syncobj 
>>>> *syncobj,
>>>> +                      u64 point,
>>>> +                      struct dma_fence **fence)
>>>> +{
>>>> +    struct rb_node *node = 
>>>> syncobj->syncobj_timeline.wait_pt_tree.rb_node;
>>>> +    struct drm_syncobj_wait_pt *wait_pt = NULL;
>>>> +
>>>> +    spin_lock(&syncobj->lock);
>>>> +    while(node) {
>>>> +        int result;
>>>> +
>>>> +        wait_pt = rb_entry(node, struct drm_syncobj_wait_pt, node);
>>>> +        result = point - wait_pt->value;
>>>> +        if (result < 0) {
>>>> +            node = node->rb_left;
>>>> +        } else if (result > 0) {
>>>> +            node = node->rb_right;
>>>> +        } else {
>>>> +            if (fence)
>>>> +                *fence = dma_fence_get(&wait_pt->base.base);
>>>> +            break;
>>>> +        }
>>>> +    }
>>>> +    spin_unlock(&syncobj->lock);
>>>> +
>>>> +    return wait_pt;
>>>> +}
>>>> +
>>>> +static struct drm_syncobj_wait_pt *
>>>> +drm_syncobj_timeline_create_wait_pt_and_return_fence(struct 
>>>> drm_syncobj *syncobj,
>>>> +                             u64 point,
>>>> +                             struct dma_fence **fence)
>>>> +{
>>>> +    struct drm_syncobj_wait_pt *wait_pt = kzalloc(sizeof(*wait_pt),
>>>> +                              GFP_KERNEL);
>>>> +    struct drm_syncobj_stub_fence *base;
>>>> +    struct rb_node **new = 
>>>> &(syncobj->syncobj_timeline.wait_pt_tree.rb_node), *parent = NULL;
>>>> +
>>>> +    if (!wait_pt)
>>>> +        return NULL;
>>>> +    base = &wait_pt->base;
>>>> +    spin_lock_init(&base->lock);
>>>> +    dma_fence_init(&base->base,
>>>> +               &drm_syncobj_stub_fence_ops,
>>>> +               &base->lock,
>>>> + syncobj->syncobj_timeline.timeline_context, point);
>>>> +    wait_pt->value = point;
>>>> +    wait_pt->signal_pt_fence = NULL;
>>>> +
>>>> +    /* wait pt must be in an order, so that we can easily lookup 
>>>> and signal
>>>> +     * it */
>>>> +    spin_lock(&syncobj->lock);
>>>> +    while(*new) {
>>>> +        struct drm_syncobj_wait_pt *this =
>>>> +            rb_entry(*new, struct drm_syncobj_wait_pt, node);
>>>> +        int result = wait_pt->value - this->value;
>>>> +
>>>> +        parent = *new;
>>>> +        if (result < 0)
>>>> +            new = &((*new)->rb_left);
>>>> +        else if (result > 0)
>>>> +            new = &((*new)->rb_right);
>>>> +        else
>>>> +            goto exist;
>>>> +    }
>>>> +    if (fence)
>>>> +        *fence = dma_fence_get(&wait_pt->base.base);
>>>> +    rb_link_node(&wait_pt->node, parent, new);
>>>> +    rb_insert_color(&wait_pt->node, 
>>>> &syncobj->syncobj_timeline.wait_pt_tree);
>>>> +    spin_unlock(&syncobj->lock);
>>>> +    return wait_pt;
>>>> +exist:
>>>> +    spin_unlock(&syncobj->lock);
>>>> +    dma_fence_put(&wait_pt->base.base);
>>>> +    wait_pt = drm_syncobj_timeline_lookup_wait_pt_fence(syncobj, 
>>>> point,
>>>> +                                fence);
>>>> +    return wait_pt;
>>>> +}
>>>> +
>>>> +static struct dma_fence *
>>>> +drm_syncobj_timeline_point_get(struct drm_syncobj *syncobj, u64 
>>>> point, u64 flags)
>>>> +{
>>>> +    struct drm_syncobj_wait_pt *wait_pt;
>>>> +    struct dma_fence *fence = NULL;
>>>> +
>>>> +    /* check if the wait pt exists */
>>>> +    wait_pt = drm_syncobj_timeline_lookup_wait_pt_fence(syncobj, 
>>>> point,
>>>> +                                &fence);
>>>> +    if (!fence) {
>>>> +        /* This is a new wait pt, so create it */
>>>> +        wait_pt = 
>>>> drm_syncobj_timeline_create_wait_pt_and_return_fence(syncobj, point,
>>>> +                                  &fence);
>>>> +        if (!fence)
>>>> +            return NULL;
>>>> +    }
>>>> +    if (wait_pt) {
>>>> +        int ret = 0;
>>>> +
>>>> +        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
>>>> +            ret = 
>>>> wait_event_interruptible_timeout(syncobj->syncobj_timeline.wq,
>>>> +                                   wait_pt->value <= 
>>>> syncobj->syncobj_timeline.signal_point,
>>>> + msecs_to_jiffies(10000)); /* wait 10s */
>>>> +            if (ret <= 0)
>>>> +                return NULL;
>>>> +        }
>>>> +        spin_lock(&syncobj->lock);
>>>> +        drm_syncobj_find_signal_pt_for_wait_pt(syncobj, wait_pt);
>>>> +        spin_unlock(&syncobj->lock);
>>>> +        return fence;
>>>> +    }
>>>> +    return NULL;
>>>> +}
>>>> +
>>>>   /**
>>>>    * drm_syncobj_find_fence - lookup and reference the fence in a 
>>>> sync object
>>>>    * @file_private: drm file private pointer
>>>> @@ -234,20 +541,46 @@ static int 
>>>> drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
>>>>    * contains a reference to the fence, which must be released by 
>>>> calling
>>>>    * dma_fence_put().
>>>>    */
>>>> -int drm_syncobj_find_fence(struct drm_file *file_private,
>>>> -               u32 handle, u64 point,
>>>> -               struct dma_fence **fence)
>>>> +int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
>>>> +                 u64 flags, struct dma_fence **fence)
>>>>   {
>>>> -    struct drm_syncobj *syncobj = drm_syncobj_find(file_private, 
>>>> handle);
>>>>       int ret = 0;
>>>>         if (!syncobj)
>>>>           return -ENOENT;
>>>>   -    *fence = drm_syncobj_fence_get(syncobj);
>>>> +    drm_syncobj_timeline_garbage_collection(syncobj);
>>>> +    if (syncobj->type == DRM_SYNCOBJ_TYPE_NORMAL) {
>>>> +        /*NORMAL syncobj always wait on last pt */
>>>> +        u64 tail_pt_value = syncobj->syncobj_timeline.signal_point;
>>>> +
>>>> +        if (tail_pt_value == 0)
>>>> +            tail_pt_value += DRM_SYNCOBJ_NORMAL_POINT;
>>>> +        /* NORMAL syncobj doesn't care point value */
>>>> +        WARN_ON(point != 0);
>>>> +        *fence = drm_syncobj_timeline_point_get(syncobj, 
>>>> tail_pt_value,
>>>> +                            flags);
>>>> +    } else if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
>>>> +        *fence = drm_syncobj_timeline_point_get(syncobj, point,
>>>> +                            flags);
>>>> +    } else {
>>>> +        DRM_ERROR("Don't support this type syncobj\n");
>>>> +        *fence = NULL;
>>>> +    }
>>>>       if (!*fence) {
>>>>           ret = -EINVAL;
>>>>       }
>>>> +    return ret;
>>>> +}
>>>> +EXPORT_SYMBOL(drm_syncobj_search_fence);
>>>> +int drm_syncobj_find_fence(struct drm_file *file_private,
>>>> +               u32 handle, u64 point,
>>>> +               struct dma_fence **fence) {
>>>> +    struct drm_syncobj *syncobj = drm_syncobj_find(file_private, 
>>>> handle);
>>>> +
>>>> +    int ret = drm_syncobj_search_fence(syncobj, point,
>>>> +                    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>>>> +                    fence);
>>>>       drm_syncobj_put(syncobj);
>>>>       return ret;
>>>>   }
>>>> @@ -264,7 +597,7 @@ void drm_syncobj_free(struct kref *kref)
>>>>       struct drm_syncobj *syncobj = container_of(kref,
>>>>                              struct drm_syncobj,
>>>>                              refcount);
>>>> -    drm_syncobj_replace_fence(syncobj, 0, NULL);
>>>> +    drm_syncobj_timeline_fini(syncobj, &syncobj->syncobj_timeline);
>>>>       kfree(syncobj);
>>>>   }
>>>>   EXPORT_SYMBOL(drm_syncobj_free);
>>>> @@ -292,8 +625,12 @@ int drm_syncobj_create(struct drm_syncobj 
>>>> **out_syncobj, uint32_t flags,
>>>>           return -ENOMEM;
>>>>         kref_init(&syncobj->refcount);
>>>> -    INIT_LIST_HEAD(&syncobj->cb_list);
>>>>       spin_lock_init(&syncobj->lock);
>>>> +    if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)
>>>> +        syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE;
>>>> +    else
>>>> +        syncobj->type = DRM_SYNCOBJ_TYPE_NORMAL;
>>>> +    drm_syncobj_timeline_init(syncobj, &syncobj->syncobj_timeline);
>>>>         if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
>>>>           ret = drm_syncobj_assign_null_handle(syncobj);
>>>> @@ -651,7 +988,6 @@ struct syncobj_wait_entry {
>>>>       struct task_struct *task;
>>>>       struct dma_fence *fence;
>>>>       struct dma_fence_cb fence_cb;
>>>> -    struct drm_syncobj_cb syncobj_cb;
>>>>   };
>>>>     static void syncobj_wait_fence_func(struct dma_fence *fence,
>>>> @@ -663,18 +999,6 @@ static void syncobj_wait_fence_func(struct 
>>>> dma_fence *fence,
>>>>       wake_up_process(wait->task);
>>>>   }
>>>>   -static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
>>>> -                      struct drm_syncobj_cb *cb)
>>>> -{
>>>> -    struct syncobj_wait_entry *wait =
>>>> -        container_of(cb, struct syncobj_wait_entry, syncobj_cb);
>>>> -
>>>> -    /* This happens inside the syncobj lock */
>>>> -    wait->fence = 
>>>> dma_fence_get(rcu_dereference_protected(syncobj->fence,
>>>> - lockdep_is_held(&syncobj->lock)));
>>>> -    wake_up_process(wait->task);
>>>> -}
>>>> -
>>>>   static signed long drm_syncobj_array_wait_timeout(struct 
>>>> drm_syncobj **syncobjs,
>>>>                             uint32_t count,
>>>>                             uint32_t flags,
>>>> @@ -698,14 +1022,11 @@ static signed long 
>>>> drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
>>>>       signaled_count = 0;
>>>>       for (i = 0; i < count; ++i) {
>>>>           entries[i].task = current;
>>>> -        entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
>>>> +        ret = drm_syncobj_search_fence(syncobjs[i], 0, 0,
>>>> +                           &entries[i].fence);
>>>>           if (!entries[i].fence) {
>>>> -            if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
>>>> -                continue;
>>>> -            } else {
>>>> -                ret = -EINVAL;
>>>> -                goto cleanup_entries;
>>>> -            }
>>>> +            ret = -EINVAL;
>>>> +            goto cleanup_entries;
>>>>           }
>>>>             if (dma_fence_is_signaled(entries[i].fence)) {
>>>> @@ -733,15 +1054,6 @@ static signed long 
>>>> drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
>>>>        * fallthough and try a 0 timeout wait!
>>>>        */
>>>>   -    if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
>>>> -        for (i = 0; i < count; ++i) {
>>>> - drm_syncobj_fence_get_or_add_callback(syncobjs[i],
>>>> -                                  &entries[i].fence,
>>>> - &entries[i].syncobj_cb,
>>>> - syncobj_wait_syncobj_func);
>>>> -        }
>>>> -    }
>>>> -
>>>>       do {
>>>>           set_current_state(TASK_INTERRUPTIBLE);
>>>>   @@ -789,13 +1101,10 @@ static signed long 
>>>> drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
>>>>     cleanup_entries:
>>>>       for (i = 0; i < count; ++i) {
>>>> -        if (entries[i].syncobj_cb.func)
>>>> -            drm_syncobj_remove_callback(syncobjs[i],
>>>> -                            &entries[i].syncobj_cb);
>>>> +        dma_fence_put(entries[i].fence);
>>>>           if (entries[i].fence_cb.func)
>>>>               dma_fence_remove_callback(entries[i].fence,
>>>>                             &entries[i].fence_cb);
>>>> -        dma_fence_put(entries[i].fence);
>>>>       }
>>>>       kfree(entries);
>>>>   @@ -970,12 +1279,21 @@ drm_syncobj_reset_ioctl(struct drm_device 
>>>> *dev, void *data,
>>>>       if (ret < 0)
>>>>           return ret;
>>>>   -    for (i = 0; i < args->count_handles; i++)
>>>> -        drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
>>>> -
>>>> +    for (i = 0; i < args->count_handles; i++) {
>>>> +        if (syncobjs[i]->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
>>>> +            DRM_ERROR("timeline syncobj cannot reset!\n");
>>>> +            ret = -EINVAL;
>>>> +            goto out;
>>>> +        }
>>>> +        drm_syncobj_timeline_fini(syncobjs[i],
>>>> + &syncobjs[i]->syncobj_timeline);
>>>> +        drm_syncobj_timeline_init(syncobjs[i],
>>>> + &syncobjs[i]->syncobj_timeline);
>>>> +    }
>>>> +out:
>>>>       drm_syncobj_array_free(syncobjs, args->count_handles);
>>>>   -    return 0;
>>>> +    return ret;
>>>>   }
>>>>     int
>>>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
>>>> b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>>> index 0a8d2d64f380..579e91a5858b 100644
>>>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>>> @@ -2137,7 +2137,9 @@ await_fence_array(struct i915_execbuffer *eb,
>>>>           if (!(flags & I915_EXEC_FENCE_WAIT))
>>>>               continue;
>>>>   -        fence = drm_syncobj_fence_get(syncobj);
>>>> +        drm_syncobj_search_fence(syncobj, 0,
>>>> + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>>>> +                     &fence);
>>>>           if (!fence)
>>>>               return -EINVAL;
>>>>   diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
>>>> index 425432b85a87..657c97dc25ec 100644
>>>> --- a/include/drm/drm_syncobj.h
>>>> +++ b/include/drm/drm_syncobj.h
>>>> @@ -30,6 +30,25 @@
>>>>     struct drm_syncobj_cb;
>>>>   +enum drm_syncobj_type {
>>>> +    DRM_SYNCOBJ_TYPE_NORMAL,
>>>> +    DRM_SYNCOBJ_TYPE_TIMELINE
>>>> +};
>>>> +
>>>> +struct drm_syncobj_timeline {
>>>> +    wait_queue_head_t    wq;
>>>> +    u64 timeline_context;
>>>> +    /**
>>>> +     * @timeline: syncobj timeline
>>>> +     */
>>>> +    u64 timeline;
>>>> +    u64 signal_point;
>>>> +
>>>> +
>>>> +    struct rb_root wait_pt_tree;
>>>> +    struct list_head signal_pt_list;
>>>> +};
>>>> +
>>>>   /**
>>>>    * struct drm_syncobj - sync object.
>>>>    *
>>>> @@ -41,19 +60,16 @@ struct drm_syncobj {
>>>>        */
>>>>       struct kref refcount;
>>>>       /**
>>>> -     * @fence:
>>>> -     * NULL or a pointer to the fence bound to this object.
>>>> -     *
>>>> -     * This field should not be used directly. Use 
>>>> drm_syncobj_fence_get()
>>>> -     * and drm_syncobj_replace_fence() instead.
>>>> +     * @type: indicate syncobj type
>>>>        */
>>>> -    struct dma_fence __rcu *fence;
>>>> +    enum drm_syncobj_type type;
>>>>       /**
>>>> -     * @cb_list: List of callbacks to call when the &fence gets 
>>>> replaced.
>>>> +     * @syncobj_timeline: timeline
>>>>        */
>>>> -    struct list_head cb_list;
>>>> +    struct drm_syncobj_timeline syncobj_timeline;
>>>> +
>>>>       /**
>>>> -     * @lock: Protects &cb_list and write-locks &fence.
>>>> +     * @lock: Protects timeline list and write-locks &fence.
>>>>        */
>>>>       spinlock_t lock;
>>>>       /**
>>>> @@ -62,25 +78,6 @@ struct drm_syncobj {
>>>>       struct file *file;
>>>>   };
>>>>   -typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
>>>> -                   struct drm_syncobj_cb *cb);
>>>> -
>>>> -/**
>>>> - * struct drm_syncobj_cb - callback for drm_syncobj_add_callback
>>>> - * @node: used by drm_syncob_add_callback to append this struct to
>>>> - *      &drm_syncobj.cb_list
>>>> - * @func: drm_syncobj_func_t to call
>>>> - *
>>>> - * This struct will be initialized by drm_syncobj_add_callback, 
>>>> additional
>>>> - * data can be passed along by embedding drm_syncobj_cb in another 
>>>> struct.
>>>> - * The callback will get called the next time 
>>>> drm_syncobj_replace_fence is
>>>> - * called.
>>>> - */
>>>> -struct drm_syncobj_cb {
>>>> -    struct list_head node;
>>>> -    drm_syncobj_func_t func;
>>>> -};
>>>> -
>>>>   void drm_syncobj_free(struct kref *kref);
>>>>     /**
>>>> @@ -106,29 +103,6 @@ drm_syncobj_put(struct drm_syncobj *obj)
>>>>       kref_put(&obj->refcount, drm_syncobj_free);
>>>>   }
>>>>   -/**
>>>> - * drm_syncobj_fence_get - get a reference to a fence in a sync 
>>>> object
>>>> - * @syncobj: sync object.
>>>> - *
>>>> - * This acquires additional reference to &drm_syncobj.fence 
>>>> contained in @obj,
>>>> - * if not NULL. It is illegal to call this without already holding 
>>>> a reference.
>>>> - * No locks required.
>>>> - *
>>>> - * Returns:
>>>> - * Either the fence of @obj or NULL if there's none.
>>>> - */
>>>> -static inline struct dma_fence *
>>>> -drm_syncobj_fence_get(struct drm_syncobj *syncobj)
>>>> -{
>>>> -    struct dma_fence *fence;
>>>> -
>>>> -    rcu_read_lock();
>>>> -    fence = dma_fence_get_rcu_safe(&syncobj->fence);
>>>> -    rcu_read_unlock();
>>>> -
>>>> -    return fence;
>>>> -}
>>>> -
>>>>   struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
>>>>                        u32 handle);
>>>>   void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 
>>>> point,
>>>> @@ -142,5 +116,7 @@ int drm_syncobj_create(struct drm_syncobj 
>>>> **out_syncobj, uint32_t flags,
>>>>   int drm_syncobj_get_handle(struct drm_file *file_private,
>>>>                  struct drm_syncobj *syncobj, u32 *handle);
>>>>   int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd);
>>>> +int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
>>>> +                 u64 flags, struct dma_fence **fence);
>>>>     #endif
>>>> diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
>>>> index 300f336633f2..cebdb2541eb7 100644
>>>> --- a/include/uapi/drm/drm.h
>>>> +++ b/include/uapi/drm/drm.h
>>>> @@ -717,6 +717,7 @@ struct drm_prime_handle {
>>>>   struct drm_syncobj_create {
>>>>       __u32 handle;
>>>>   #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
>>>> +#define DRM_SYNCOBJ_CREATE_TYPE_TIMELINE (1 << 1)
>>>>       __u32 flags;
>>>>   };
>>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



More information about the amd-gfx mailing list