[RFC] dma-buf: Rename struct fence to dma_fence

Sean Paul seanpaul at google.com
Wed Jul 13 15:07:47 UTC 2016


On Wed, Jul 13, 2016 at 7:46 AM, Daniel Vetter <daniel at ffwll.ch> wrote:
> On Wed, Jul 13, 2016 at 03:10:45PM +0100, Chris Wilson wrote:
>> I plan to usurp the short name of struct fence for a core kernel struct,
>> and so I need to rename the specialised fence/timeline for DMA
>> operations to make room.
>>
>> As an indication of the scale of the flag day:
>>
>>  91 files changed, 904 insertions(+), 880 deletions(-)
>>
>> with the greatest victim being amdgpu.
>>
>> Just the highlights shown below.
>
> +1 on dma_fence, for more consistency with dma_buf and everything else
> dma_*. I think if we land this right before/after 4.8-rc1 through the drm
> tree it should be minimally invasive. Worst case we'll have a fun merge
> between drm.git and drm-intel.git (since drm-intel-next-queued doesn't get
> closed while the merge window is open).
>
> Adding lots more people to gather their opinion.


LGTM, can you post the coccinelle script (assuming this is what you
used) along with the patch so we can repro locally to confirm
everything lines up?

Sean


> -Daniel
>
>> -Chris
>>
>> ---
>>  drivers/base/Kconfig                            |   6 +-
>>  drivers/dma-buf/Makefile                        |   2 +-
>>  drivers/dma-buf/dma-buf.c                       |  28 +-
>>  drivers/dma-buf/dma-fence.c                     | 535 ++++++++++++++++++++++++
>>  drivers/dma-buf/fence.c                         | 532 -----------------------
>>  drivers/dma-buf/reservation.c                   |  90 ++--
>>  drivers/dma-buf/seqno-fence.c                   |  18 +-
>>  drivers/dma-buf/sw_sync.c                       |  44 +-
>>  drivers/dma-buf/sync_debug.c                    |   9 +-
>>  drivers/dma-buf/sync_debug.h                    |  13 +-
>>  drivers/dma-buf/sync_file.c                     |  30 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu.h             |  56 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c   |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c          |  18 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c         |  22 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c      |   2 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_display.c     |  16 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c       |  50 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c          |  10 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c         |  18 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c      |   2 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.h      |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c          |  24 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c        |  56 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_test.c        |  12 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h       |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c         |   6 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c         |  18 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h         |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c         |  22 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h         |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c          |  56 +--
>>  drivers/gpu/drm/amd/amdgpu/cik_sdma.c           |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c           |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c           |  16 +-
>>  drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c          |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c          |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c           |   6 +-
>>  drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c           |   6 +-
>>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c           |   6 +-
>>  drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h |   4 +-
>>  drivers/gpu/drm/amd/scheduler/gpu_scheduler.c   |  42 +-
>>  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h   |  24 +-
>>  drivers/gpu/drm/amd/scheduler/sched_fence.c     |  22 +-
>>  drivers/gpu/drm/drm_atomic_helper.c             |   6 +-
>>  drivers/gpu/drm/etnaviv/etnaviv_gem.c           |   6 +-
>>  drivers/gpu/drm/etnaviv/etnaviv_gpu.c           |  46 +-
>>  drivers/gpu/drm/etnaviv/etnaviv_gpu.h           |   4 +-
>>  drivers/gpu/drm/imx/ipuv3-crtc.c                |  12 +-
>>  drivers/gpu/drm/msm/msm_drv.h                   |   2 +-
>>  drivers/gpu/drm/msm/msm_fence.c                 |  30 +-
>>  drivers/gpu/drm/msm/msm_fence.h                 |   2 +-
>>  drivers/gpu/drm/msm/msm_gem.c                   |  14 +-
>>  drivers/gpu/drm/msm/msm_gem.h                   |   2 +-
>>  drivers/gpu/drm/msm/msm_gem_submit.c            |   2 +-
>>  drivers/gpu/drm/msm/msm_gpu.c                   |   2 +-
>>  drivers/gpu/drm/nouveau/nouveau_bo.c            |   6 +-
>>  drivers/gpu/drm/nouveau/nouveau_fence.c         |  68 +--
>>  drivers/gpu/drm/nouveau/nouveau_fence.h         |   6 +-
>>  drivers/gpu/drm/nouveau/nouveau_gem.c           |   2 +-
>>  drivers/gpu/drm/nouveau/nv04_fence.c            |   2 +-
>>  drivers/gpu/drm/nouveau/nv10_fence.c            |   2 +-
>>  drivers/gpu/drm/nouveau/nv17_fence.c            |   2 +-
>>  drivers/gpu/drm/nouveau/nv50_fence.c            |   2 +-
>>  drivers/gpu/drm/nouveau/nv84_fence.c            |   2 +-
>>  drivers/gpu/drm/qxl/qxl_drv.h                   |   4 +-
>>  drivers/gpu/drm/qxl/qxl_release.c               |  27 +-
>>  drivers/gpu/drm/radeon/radeon.h                 |  10 +-
>>  drivers/gpu/drm/radeon/radeon_device.c          |   2 +-
>>  drivers/gpu/drm/radeon/radeon_display.c         |   8 +-
>>  drivers/gpu/drm/radeon/radeon_fence.c           |  50 +--
>>  drivers/gpu/drm/radeon/radeon_sync.c            |   6 +-
>>  drivers/gpu/drm/radeon/radeon_uvd.c             |   2 +-
>>  drivers/gpu/drm/ttm/ttm_bo.c                    |  24 +-
>>  drivers/gpu/drm/ttm/ttm_bo_util.c               |   2 +-
>>  drivers/gpu/drm/ttm/ttm_execbuf_util.c          |   3 +-
>>  drivers/gpu/drm/virtio/virtgpu_display.c        |   2 +-
>>  drivers/gpu/drm/virtio/virtgpu_drv.h            |   2 +-
>>  drivers/gpu/drm/virtio/virtgpu_fence.c          |  22 +-
>>  drivers/gpu/drm/virtio/virtgpu_ioctl.c          |  12 +-
>>  drivers/gpu/drm/vmwgfx/vmwgfx_fence.c           |  40 +-
>>  drivers/gpu/drm/vmwgfx/vmwgfx_fence.h           |   8 +-
>>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c        |   2 +-
>>  include/drm/drm_crtc.h                          |   4 +-
>>  include/drm/ttm/ttm_bo_driver.h                 |   2 +-
>>  include/drm/ttm/ttm_execbuf_util.h              |   2 +-
>>  include/linux/dma-buf.h                         |   4 +-
>>  include/linux/dma-fence.h                       | 392 +++++++++++++++++
>>  include/linux/fence.h                           | 379 -----------------
>>  include/linux/reservation.h                     |  28 +-
>>  include/linux/seqno-fence.h                     |  16 +-
>>  include/linux/sync_file.h                       |   8 +-
>>  include/trace/events/dma-fence.h                | 128 ++++++
>>  include/trace/events/fence.h                    | 128 ------
>>  94 files changed, 1719 insertions(+), 1695 deletions(-)
>>  create mode 100644 drivers/dma-buf/dma-fence.c
>>  delete mode 100644 drivers/dma-buf/fence.c
>>  create mode 100644 include/linux/dma-fence.h
>>  delete mode 100644 include/linux/fence.h
>>  create mode 100644 include/trace/events/dma-fence.h
>>  delete mode 100644 include/trace/events/fence.h
>>
>> diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
>> index 98504ec99c7d..2f716c374616 100644
>> --- a/drivers/base/Kconfig
>> +++ b/drivers/base/Kconfig
>> @@ -238,11 +238,11 @@ config DMA_SHARED_BUFFER
>>    APIs extension; the file's descriptor can then be passed on to other
>>    driver.
>>
>> -config FENCE_TRACE
>> - bool "Enable verbose FENCE_TRACE messages"
>> +config DMA_FENCE_TRACE
>> + bool "Enable verbose DMA_FENCE_TRACE messages"
>>   depends on DMA_SHARED_BUFFER
>>   help
>> -  Enable the FENCE_TRACE printks. This will add extra
>> +  Enable the DMA_FENCE_TRACE printks. This will add extra
>>    spam to the console log, but will make it easier to diagnose
>>    lockup related problems for dma-buffers shared across multiple
>>    devices.
>> diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
>> index d1559cd04e3d..8a5a6dc917f6 100644
>> --- a/include/drm/drm_crtc.h
>> +++ b/include/drm/drm_crtc.h
>> @@ -43,7 +43,7 @@ struct drm_object_properties;
>>  struct drm_file;
>>  struct drm_clip_rect;
>>  struct device_node;
>> -struct fence;
>> +struct dma_fence;
>>
>>  struct drm_mode_object {
>>   uint32_t id;
>> @@ -1270,7 +1270,7 @@ struct drm_plane_state {
>>
>>   struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
>>   struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
>> - struct fence *fence;
>> + struct dma_fence *fence;
>>
>>   /* Signed dest location allows it to be partially off screen */
>>   int32_t crtc_x, crtc_y;
>> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
>> index 513f7f96b80a..325776a00907 100644
>> --- a/include/drm/ttm/ttm_bo_driver.h
>> +++ b/include/drm/ttm/ttm_bo_driver.h
>> @@ -1023,7 +1023,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
>>   */
>>
>>  extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
>> -     struct fence *fence,
>> +     struct dma_fence *fence,
>>       bool evict, bool no_wait_gpu,
>>       struct ttm_mem_reg *new_mem);
>>  /**
>> diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
>> index b620c317c772..47f35b8e6d09 100644
>> --- a/include/drm/ttm/ttm_execbuf_util.h
>> +++ b/include/drm/ttm/ttm_execbuf_util.h
>> @@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>>
>>  extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
>>   struct list_head *list,
>> - struct fence *fence);
>> + struct dma_fence *fence);
>>
>>  #endif
>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>> index 4551c6f2a6c4..a22550795ac7 100644
>> --- a/include/linux/dma-buf.h
>> +++ b/include/linux/dma-buf.h
>> @@ -30,7 +30,7 @@
>>  #include <linux/list.h>
>>  #include <linux/dma-mapping.h>
>>  #include <linux/fs.h>
>> -#include <linux/fence.h>
>> +#include <linux/dma-fence.h>
>>  #include <linux/wait.h>
>>
>>  struct device;
>> @@ -143,7 +143,7 @@ struct dma_buf {
>>   wait_queue_head_t poll;
>>
>>   struct dma_buf_poll_cb_t {
>> - struct fence_cb cb;
>> + struct dma_fence_cb cb;
>>   wait_queue_head_t *poll;
>>
>>   unsigned long active;
>> diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
>> new file mode 100644
>> index 000000000000..a543a87fd0ed
>> --- /dev/null
>> +++ b/include/linux/dma-fence.h
>> @@ -0,0 +1,392 @@
>> +/*
>> + * Fence mechanism for dma-buf to allow for asynchronous dma access
>> + *
>> + * Copyright (C) 2012 Canonical Ltd
>> + * Copyright (C) 2012 Texas Instruments
>> + *
>> + * Authors:
>> + * Rob Clark <robdclark at gmail.com>
>> + * Maarten Lankhorst <maarten.lankhorst at canonical.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify it
>> + * under the terms of the GNU General Public License version 2 as published by
>> + * the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful, but WITHOUT
>> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
>> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
>> + * more details.
>> + */
>> +
>> +#ifndef __LINUX_DMA_FENCE_H
>> +#define __LINUX_DMA_FENCE_H
>> +
>> +#include <linux/err.h>
>> +#include <linux/wait.h>
>> +#include <linux/list.h>
>> +#include <linux/bitops.h>
>> +#include <linux/kref.h>
>> +#include <linux/sched.h>
>> +#include <linux/printk.h>
>> +#include <linux/rcupdate.h>
>> +
>> +struct dma_fence;
>> +struct dma_fence_ops;
>> +struct dma_fence_cb;
>> +
>> +/**
>> + * struct dma_fence - software synchronization primitive
>> + * @refcount: refcount for this fence
>> + * @ops: dma_fence_ops associated with this fence
>> + * @rcu: used for releasing fence with kfree_rcu
>> + * @cb_list: list of all callbacks to call
>> + * @lock: spin_lock_irqsave used for locking
>> + * @context: execution context this fence belongs to, returned by
>> + *           dma_fence_context_alloc()
>> + * @seqno: the sequence number of this fence inside the execution context,
>> + * can be compared to decide which fence would be signaled later.
>> + * @flags: A mask of DMA_FENCE_FLAG_* defined below
>> + * @timestamp: Timestamp when the fence was signaled.
>> + * @status: Optional, only valid if < 0, must be set before calling
>> + * dma_fence_signal, indicates that the fence has completed with an error.
>> + * @child_list: list of children fences
>> + * @active_list: list of active fences
>> + *
>> + * the flags member must be manipulated and read using the appropriate
>> + * atomic ops (bit_*), so taking the spinlock will not be needed most
>> + * of the time.
>> + *
>> + * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
>> + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
>> + * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
>> + * implementer of the fence for its own purposes. Can be used in different
>> + * ways by different fence implementers, so do not rely on this.
>> + *
>> + * *) Since atomic bitops are used, this is not guaranteed to be the case.
>> + * Particularly, if the bit was set, but dma_fence_signal was called right
>> + * before this bit was set, it would have been able to set the
>> + * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
>> + * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
>> + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
>> + * after dma_fence_signal was called, any enable_signaling call will have either
>> + * been completed, or never called at all.
>> + */
>> +struct dma_fence {
>> + struct kref refcount;
>> + const struct dma_fence_ops *ops;
>> + struct rcu_head rcu;
>> + struct list_head cb_list;
>> + spinlock_t *lock;
>> + unsigned context, seqno;
>> + unsigned long flags;
>> + ktime_t timestamp;
>> + int status;
>> +};
>> +
>> +enum dma_fence_flag_bits {
>> + DMA_FENCE_FLAG_SIGNALED_BIT,
>> + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
>> + DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
>> +};
>> +
>> +typedef void (*dma_fence_func_t)(struct dma_fence *fence,
>> + struct dma_fence_cb *cb);
>> +
>> +/**
>> + * struct dma_fence_cb - callback for dma_fence_add_callback
>> + * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
>> + * @func: dma_fence_func_t to call
>> + *
>> + * This struct will be initialized by dma_fence_add_callback, additional
>> + * data can be passed along by embedding dma_fence_cb in another struct.
>> + */
>> +struct dma_fence_cb {
>> + struct list_head node;
>> + dma_fence_func_t func;
>> +};
>> +
>> +/**
>> + * struct dma_fence_ops - operations implemented for fence
>> + * @get_driver_name: returns the driver name.
>> + * @get_timeline_name: return the name of the context this fence belongs to.
>> + * @enable_signaling: enable software signaling of fence.
>> + * @signaled: [optional] peek whether the fence is signaled, can be null.
>> + * @wait: custom wait implementation, or dma_fence_default_wait.
>> + * @release: [optional] called on destruction of fence, can be null
>> + * @fill_driver_data: [optional] callback to fill in free-form debug info
>> + * Returns amount of bytes filled, or -errno.
>> + * @fence_value_str: [optional] fills in the value of the fence as a string
>> + * @timeline_value_str: [optional] fills in the current value of the timeline
>> + * as a string
>> + *
>> + * Notes on enable_signaling:
>> + * For fence implementations that have the capability for hw->hw
>> + * signaling, they can implement this op to enable the necessary
>> + * irqs, or insert commands into cmdstream, etc.  This is called
>> + * in the first wait() or add_callback() path to let the fence
>> + * implementation know that there is another driver waiting on
>> + * the signal (ie. hw->sw case).
>> + *
>> + * This function can be called called from atomic context, but not
>> + * from irq context, so normal spinlocks can be used.
>> + *
>> + * A return value of false indicates the fence already passed,
>> + * or some failure occurred that made it impossible to enable
>> + * signaling. True indicates successful enabling.
>> + *
>> + * fence->status may be set in enable_signaling, but only when false is
>> + * returned.
>> + *
>> + * Calling dma_fence_signal before enable_signaling is called allows
>> + * for a tiny race window in which enable_signaling is called during,
>> + * before, or after dma_fence_signal. To fight this, it is recommended
>> + * that before enable_signaling returns true an extra reference is
>> + * taken on the fence, to be released when the fence is signaled.
>> + * This will mean dma_fence_signal will still be called twice, but
>> + * the second time will be a noop since it was already signaled.
>> + *
>> + * Notes on signaled:
>> + * May set fence->status if returning true.
>> + *
>> + * Notes on wait:
>> + * Must not be NULL, set to dma_fence_default_wait for default implementation.
>> + * the dma_fence_default_wait implementation should work for any fence, as long
>> + * as enable_signaling works correctly.
>> + *
>> + * Must return -ERESTARTSYS if the wait is intr = true and the wait was
>> + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
>> + * timed out. Can also return other error values on custom implementations,
>> + * which should be treated as if the fence is signaled. For example a hardware
>> + * lockup could be reported like that.
>> + *
>> + * Notes on release:
>> + * Can be NULL, this function allows additional commands to run on
>> + * destruction of the fence. Can be called from irq context.
>> + * If pointer is set to NULL, kfree will get called instead.
>> + */
>> +
>> +struct dma_fence_ops {
>> + const char * (*get_driver_name)(struct dma_fence *fence);
>> + const char * (*get_timeline_name)(struct dma_fence *fence);
>> + bool (*enable_signaling)(struct dma_fence *fence);
>> + bool (*signaled)(struct dma_fence *fence);
>> + signed long (*wait)(struct dma_fence *fence,
>> +    bool intr, signed long timeout);
>> + void (*release)(struct dma_fence *fence);
>> +
>> + int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
>> + void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
>> + void (*timeline_value_str)(struct dma_fence *fence,
>> +   char *str, int size);
>> +};
>> +
>> +void dma_fence_init(struct dma_fence *fence,
>> +    const struct dma_fence_ops *ops,
>> +    spinlock_t *lock, unsigned context, unsigned seqno);
>> +
>> +void dma_fence_release(struct kref *kref);
>> +void dma_fence_free(struct dma_fence *fence);
>> +
>> +/**
>> + * dma_fence_get - increases refcount of the fence
>> + * @fence: [in] fence to increase refcount of
>> + *
>> + * Returns the same fence, with refcount increased by 1.
>> + */
>> +static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
>> +{
>> + if (fence)
>> + kref_get(&fence->refcount);
>> + return fence;
>> +}
>> +
>> +/**
>> + * dma_fence_get_rcu - get a fence from a reservation_object_list with
>> + *                     rcu read lock
>> + * @fence: [in] fence to increase refcount of
>> + *
>> + * Function returns NULL if no refcount could be obtained, or the fence.
>> + */
>> +static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
>> +{
>> + if (kref_get_unless_zero(&fence->refcount))
>> + return fence;
>> + else
>> + return NULL;
>> +}
>> +
>> +/**
>> + * dma_fence_put - decreases refcount of the fence
>> + * @fence: [in] fence to reduce refcount of
>> + */
>> +static inline void dma_fence_put(struct dma_fence *fence)
>> +{
>> + if (fence)
>> + kref_put(&fence->refcount, dma_fence_release);
>> +}
>> +
>> +int dma_fence_signal(struct dma_fence *fence);
>> +int dma_fence_signal_locked(struct dma_fence *fence);
>> +signed long dma_fence_default_wait(struct dma_fence *fence,
>> +   bool intr, signed long timeout);
>> +int dma_fence_add_callback(struct dma_fence *fence,
>> +   struct dma_fence_cb *cb,
>> +   dma_fence_func_t func);
>> +bool dma_fence_remove_callback(struct dma_fence *fence,
>> +       struct dma_fence_cb *cb);
>> +void dma_fence_enable_sw_signaling(struct dma_fence *fence);
>> +
>> +/**
>> + * dma_fence_is_signaled_locked - Return an indication if the fence
>> + *                                is signaled yet.
>> + * @fence: [in] the fence to check
>> + *
>> + * Returns true if the fence was already signaled, false if not. Since this
>> + * function doesn't enable signaling, it is not guaranteed to ever return
>> + * true if dma_fence_add_callback, dma_fence_wait or
>> + * dma_fence_enable_sw_signaling haven't been called before.
>> + *
>> + * This function requires fence->lock to be held.
>> + */
>> +static inline bool
>> +dma_fence_is_signaled_locked(struct dma_fence *fence)
>> +{
>> + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
>> + return true;
>> +
>> + if (fence->ops->signaled && fence->ops->signaled(fence)) {
>> + dma_fence_signal_locked(fence);
>> + return true;
>> + }
>> +
>> + return false;
>> +}
>> +
>> +/**
>> + * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
>> + * @fence: [in] the fence to check
>> + *
>> + * Returns true if the fence was already signaled, false if not. Since this
>> + * function doesn't enable signaling, it is not guaranteed to ever return
>> + * true if dma_fence_add_callback, dma_fence_wait or
>> + * dma_fence_enable_sw_signaling haven't been called before.
>> + *
>> + * It's recommended for seqno fences to call dma_fence_signal when the
>> + * operation is complete, it makes it possible to prevent issues from
>> + * wraparound between time of issue and time of use by checking the return
>> + * value of this function before calling hardware-specific wait instructions.
>> + */
>> +static inline bool
>> +dma_fence_is_signaled(struct dma_fence *fence)
>> +{
>> + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
>> + return true;
>> +
>> + if (fence->ops->signaled && fence->ops->signaled(fence)) {
>> + dma_fence_signal(fence);
>> + return true;
>> + }
>> +
>> + return false;
>> +}
>> +
>> +/**
>> + * dma_fence_is_later - return if f1 is chronologically later than f2
>> + * @f1: [in] the first fence from the same context
>> + * @f2: [in] the second fence from the same context
>> + *
>> + * Returns true if f1 is chronologically later than f2. Both fences must be
>> + * from the same context, since a seqno is not re-used across contexts.
>> + */
>> +static inline bool dma_fence_is_later(struct dma_fence *f1,
>> +      struct dma_fence *f2)
>> +{
>> + if (WARN_ON(f1->context != f2->context))
>> + return false;
>> +
>> + return (int)(f1->seqno - f2->seqno) > 0;
>> +}
>> +
>> +/**
>> + * dma_fence_later - return the chronologically later fence
>> + * @f1: [in] the first fence from the same context
>> + * @f2: [in] the second fence from the same context
>> + *
>> + * Returns NULL if both fences are signaled, otherwise the fence that would be
>> + * signaled last. Both fences must be from the same context, since a seqno is
>> + * not re-used across contexts.
>> + */
>> +static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
>> +       struct dma_fence *f2)
>> +{
>> + if (WARN_ON(f1->context != f2->context))
>> + return NULL;
>> +
>> + /*
>> + * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
>> + * have been set if enable_signaling wasn't called, and enabling that
>> + * here is overkill.
>> + */
>> + if (dma_fence_is_later(f1, f2))
>> + return dma_fence_is_signaled(f1) ? NULL : f1;
>> + else
>> + return dma_fence_is_signaled(f2) ? NULL : f2;
>> +}
>> +
>> +signed long dma_fence_wait_timeout(struct dma_fence *,
>> +   bool intr, signed long timeout);
>> +signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
>> +       uint32_t count,
>> +       bool intr, signed long timeout);
>> +
>> +/**
>> + * dma_fence_wait - sleep until the fence gets signaled
>> + * @fence: [in] the fence to wait on
>> + * @intr: [in] if true, do an interruptible wait
>> + *
>> + * This function will return -ERESTARTSYS if interrupted by a signal,
>> + * or 0 if the fence was signaled. Other error values may be
>> + * returned on custom implementations.
>> + *
>> + * Performs a synchronous wait on this fence. It is assumed the caller
>> + * directly or indirectly holds a reference to the fence, otherwise the
>> + * fence might be freed before return, resulting in undefined behavior.
>> + */
>> +static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
>> +{
>> + signed long ret;
>> +
>> + /* Since dma_fence_wait_timeout cannot timeout with
>> + * MAX_SCHEDULE_TIMEOUT, only valid return values are
>> + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
>> + */
>> + ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
>> +
>> + return ret < 0 ? ret : 0;
>> +}
>> +
>> +unsigned dma_fence_context_alloc(unsigned num);
>> +
>> +#define DMA_FENCE_TRACE(f, fmt, args...) \
>> + do { \
>> + struct dma_fence *__ff = (f); \
>> + if (config_enabled(CONFIG_DMA_FENCE_TRACE)) \
>> + pr_info("f %u#%u: " fmt, \
>> + __ff->context, __ff->seqno, ##args); \
>> + } while (0)
>> +
>> +#define DMA_FENCE_WARN(f, fmt, args...) \
>> + do { \
>> + struct dma_fence *__ff = (f); \
>> + pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
>> + ##args); \
>> + } while (0)
>> +
>> +#define DMA_FENCE_ERR(f, fmt, args...) \
>> + do { \
>> + struct dma_fence *__ff = (f); \
>> + pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
>> + ##args); \
>> + } while (0)
>> +
>> +#endif /* __LINUX_DMA_FENCE_H */
>> diff --git a/include/linux/reservation.h b/include/linux/reservation.h
>> index b0f305e77b7f..2e313cca08f0 100644
>> --- a/include/linux/reservation.h
>> +++ b/include/linux/reservation.h
>> @@ -40,7 +40,7 @@
>>  #define _LINUX_RESERVATION_H
>>
>>  #include <linux/ww_mutex.h>
>> -#include <linux/fence.h>
>> +#include <linux/dma-fence.h>
>>  #include <linux/slab.h>
>>  #include <linux/seqlock.h>
>>  #include <linux/rcupdate.h>
>> @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
>>  struct reservation_object_list {
>>   struct rcu_head rcu;
>>   u32 shared_count, shared_max;
>> - struct fence __rcu *shared[];
>> + struct dma_fence __rcu *shared[];
>>  };
>>
>>  /**
>> @@ -74,7 +74,7 @@ struct reservation_object {
>>   struct ww_mutex lock;
>>   seqcount_t seq;
>>
>> - struct fence __rcu *fence_excl;
>> + struct dma_fence __rcu *fence_excl;
>>   struct reservation_object_list __rcu *fence;
>>   struct reservation_object_list *staged;
>>  };
>> @@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
>>  {
>>   int i;
>>   struct reservation_object_list *fobj;
>> - struct fence *excl;
>> + struct dma_fence *excl;
>>
>>   /*
>>   * This object should be dead and all references must have
>> @@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
>>   */
>>   excl = rcu_dereference_protected(obj->fence_excl, 1);
>>   if (excl)
>> - fence_put(excl);
>> + dma_fence_put(excl);
>>
>>   fobj = rcu_dereference_protected(obj->fence, 1);
>>   if (fobj) {
>>   for (i = 0; i < fobj->shared_count; ++i)
>> - fence_put(rcu_dereference_protected(fobj->shared[i], 1));
>> + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
>>
>>   kfree(fobj);
>>   }
>> @@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
>>   * RETURNS
>>   * The exclusive fence or NULL
>>   */
>> -static inline struct fence *
>> +static inline struct dma_fence *
>>  reservation_object_get_excl(struct reservation_object *obj)
>>  {
>>   return rcu_dereference_protected(obj->fence_excl,
>> @@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj)
>>   * RETURNS
>>   * The exclusive fence or NULL if none
>>   */
>> -static inline struct fence *
>> +static inline struct dma_fence *
>>  reservation_object_get_excl_rcu(struct reservation_object *obj)
>>  {
>> - struct fence *fence;
>> + struct dma_fence *fence;
>>   unsigned seq;
>>  retry:
>>   seq = read_seqcount_begin(&obj->seq);
>> @@ -186,22 +186,22 @@ retry:
>>   rcu_read_unlock();
>>   goto retry;
>>   }
>> - fence = fence_get(fence);
>> + fence = dma_fence_get(fence);
>>   rcu_read_unlock();
>>   return fence;
>>  }
>>
>>  int reservation_object_reserve_shared(struct reservation_object *obj);
>>  void reservation_object_add_shared_fence(struct reservation_object *obj,
>> - struct fence *fence);
>> + struct dma_fence *fence);
>>
>>  void reservation_object_add_excl_fence(struct reservation_object *obj,
>> -       struct fence *fence);
>> +       struct dma_fence *fence);
>>
>>  int reservation_object_get_fences_rcu(struct reservation_object *obj,
>> -      struct fence **pfence_excl,
>> +      struct dma_fence **pfence_excl,
>>        unsigned *pshared_count,
>> -      struct fence ***pshared);
>> +      struct dma_fence ***pshared);
>>
>>  long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
>>   bool wait_all, bool intr,
>> diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
>> index a1ba6a5ccdd6..a1f2dbeb1ba6 100644
>> --- a/include/linux/seqno-fence.h
>> +++ b/include/linux/seqno-fence.h
>> @@ -20,7 +20,7 @@
>>  #ifndef __LINUX_SEQNO_FENCE_H
>>  #define __LINUX_SEQNO_FENCE_H
>>
>> -#include <linux/fence.h>
>> +#include <linux/dma-fence.h>
>>  #include <linux/dma-buf.h>
>>
>>  enum seqno_fence_condition {
>> @@ -29,15 +29,15 @@ enum seqno_fence_condition {
>>  };
>>
>>  struct seqno_fence {
>> - struct fence base;
>> + struct dma_fence base;
>>
>> - const struct fence_ops *ops;
>> + const struct dma_fence_ops *ops;
>>   struct dma_buf *sync_buf;
>>   uint32_t seqno_ofs;
>>   enum seqno_fence_condition condition;
>>  };
>>
>> -extern const struct fence_ops seqno_fence_ops;
>> +extern const struct dma_fence_ops seqno_fence_ops;
>>
>>  /**
>>   * to_seqno_fence - cast a fence to a seqno_fence
>> @@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
>>   * or the seqno_fence otherwise.
>>   */
>>  static inline struct seqno_fence *
>> -to_seqno_fence(struct fence *fence)
>> +to_seqno_fence(struct dma_fence *fence)
>>  {
>>   if (fence->ops != &seqno_fence_ops)
>>   return NULL;
>> @@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
>>   struct dma_buf *sync_buf,  uint32_t context,
>>   uint32_t seqno_ofs, uint32_t seqno,
>>   enum seqno_fence_condition cond,
>> - const struct fence_ops *ops)
>> + const struct dma_fence_ops *ops)
>>  {
>>   BUG_ON(!fence || !sync_buf || !ops);
>>   BUG_ON(!ops->wait || !ops->enable_signaling ||
>>         !ops->get_driver_name || !ops->get_timeline_name);
>>
>>   /*
>> - * ops is used in fence_init for get_driver_name, so needs to be
>> + * ops is used in dma_fence_init for get_driver_name, so needs to be
>>   * initialized first
>>   */
>>   fence->ops = ops;
>> - fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
>> + dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
>>   get_dma_buf(sync_buf);
>>   fence->sync_buf = sync_buf;
>>   fence->seqno_ofs = seqno_ofs;
>> diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
>> index c6ffe8b0725c..6aae242b26de 100644
>> --- a/include/linux/sync_file.h
>> +++ b/include/linux/sync_file.h
>> @@ -18,11 +18,11 @@
>>  #include <linux/ktime.h>
>>  #include <linux/list.h>
>>  #include <linux/spinlock.h>
>> -#include <linux/fence.h>
>> +#include <linux/dma-fence.h>
>>
>>  struct sync_file_cb {
>> - struct fence_cb cb;
>> - struct fence *fence;
>> + struct dma_fence_cb cb;
>> + struct dma_fence *fence;
>>   struct sync_file *sync_file;
>>  };
>>
>> @@ -52,6 +52,6 @@ struct sync_file {
>>   struct sync_file_cb cbs[];
>>  };
>>
>> -struct sync_file *sync_file_create(struct fence *fence);
>> +struct sync_file *sync_file_create(struct dma_fence *fence);
>>
>>  #endif /* _LINUX_SYNC_H */
>> diff --git a/include/trace/events/dma-fence.h b/include/trace/events/dma-fence.h
>> new file mode 100644
>> index 000000000000..9d6e9434e53c
>> --- /dev/null
>> +++ b/include/trace/events/dma-fence.h
>> @@ -0,0 +1,128 @@
>> +#undef TRACE_SYSTEM
>> +#define TRACE_SYSTEM dma-fence
>> +
>> +#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
>> +#define _TRACE_DMA_FENCE_H
>> +
>> +#include <linux/tracepoint.h>
>> +
>> +struct dma_fence;
>> +
>> +TRACE_EVENT(dma_fence_annotate_wait_on,
>> +
>> + /* fence: the fence waiting on f1, f1: the fence to be waited on. */
>> + TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
>> +
>> + TP_ARGS(fence, f1),
>> +
>> + TP_STRUCT__entry(
>> + __string(driver, fence->ops->get_driver_name(fence))
>> + __string(timeline, fence->ops->get_timeline_name(fence))
>> + __field(unsigned int, context)
>> + __field(unsigned int, seqno)
>> +
>> + __string(waiting_driver, f1->ops->get_driver_name(f1))
>> + __string(waiting_timeline, f1->ops->get_timeline_name(f1))
>> + __field(unsigned int, waiting_context)
>> + __field(unsigned int, waiting_seqno)
>> + ),
>> +
>> + TP_fast_assign(
>> + __assign_str(driver, fence->ops->get_driver_name(fence))
>> + __assign_str(timeline, fence->ops->get_timeline_name(fence))
>> + __entry->context = fence->context;
>> + __entry->seqno = fence->seqno;
>> +
>> + __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
>> + __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
>> + __entry->waiting_context = f1->context;
>> + __entry->waiting_seqno = f1->seqno;
>> +
>> + ),
>> +
>> + TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
>> +  "waits on driver=%s timeline=%s context=%u seqno=%u",
>> +  __get_str(driver), __get_str(timeline), __entry->context,
>> +  __entry->seqno,
>> +  __get_str(waiting_driver), __get_str(waiting_timeline),
>> +  __entry->waiting_context, __entry->waiting_seqno)
>> +);
>> +
>> +DECLARE_EVENT_CLASS(dma_fence,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence),
>> +
>> + TP_STRUCT__entry(
>> + __string(driver, fence->ops->get_driver_name(fence))
>> + __string(timeline, fence->ops->get_timeline_name(fence))
>> + __field(unsigned int, context)
>> + __field(unsigned int, seqno)
>> + ),
>> +
>> + TP_fast_assign(
>> + __assign_str(driver, fence->ops->get_driver_name(fence))
>> + __assign_str(timeline, fence->ops->get_timeline_name(fence))
>> + __entry->context = fence->context;
>> + __entry->seqno = fence->seqno;
>> + ),
>> +
>> + TP_printk("driver=%s timeline=%s context=%u seqno=%u",
>> +  __get_str(driver), __get_str(timeline), __entry->context,
>> +  __entry->seqno)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_emit,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_init,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_destroy,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_signaled,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_wait_start,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +DEFINE_EVENT(dma_fence, dma_fence_wait_end,
>> +
>> + TP_PROTO(struct dma_fence *fence),
>> +
>> + TP_ARGS(fence)
>> +);
>> +
>> +#endif /*  _TRACE_DMA_FENCE_H */
>> +
>> +/* This part must be outside protection */
>> +#include <trace/define_trace.h>
>> --
>> 2.8.1
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch


More information about the dri-devel mailing list