[PATCH 6/7] dma_fence_publish
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Fri Jun 12 13:50:22 UTC 2020
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
drivers/dma-buf/dma-fence.c | 19 +++++++++++++------
drivers/dma-buf/dma-resv.c | 11 ++++++++---
drivers/dma-buf/sync_file.c | 1 +
drivers/gpu/drm/drm_syncobj.c | 1 +
drivers/gpu/drm/i915/i915_sw_fence.c | 2 ++
drivers/gpu/drm/i915/i915_sw_fence_work.c | 4 ++--
include/linux/dma-fence.h | 21 +++++++++++++++------
7 files changed, 42 insertions(+), 17 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 754e6fb84fb7..bb50afb06f8b 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -259,8 +259,11 @@ struct lockdep_map dma_fence_lockdep_map = {
* Opaque cookie needed by the implementation, which needs to be passed to
* dma_fence_end_signalling().
*/
-bool dma_fence_begin_signalling(void)
+bool dma_fence_begin_signalling(struct dma_fence *fence)
{
+ if (!test_bit(DMA_FENCE_FLAG_PUBLISHED, &fence->flags))
+ return true;
+
/* explicitly nesting ... */
if (lock_is_held_type(&dma_fence_lockdep_map, 1))
return true;
@@ -281,7 +284,7 @@ EXPORT_SYMBOL(dma_fence_begin_signalling);
*
* Closes a critical section annotation opened by dma_fence_begin_signalling().
*/
-void dma_fence_end_signalling(bool cookie)
+void dma_fence_end_signalling(struct dma_fence *fence, bool cookie)
{
if (cookie)
return;
@@ -290,10 +293,13 @@ void dma_fence_end_signalling(bool cookie)
}
EXPORT_SYMBOL(dma_fence_end_signalling);
-void __dma_fence_might_wait(void)
+void __dma_fence_might_wait(struct dma_fence *fence)
{
bool tmp;
+ if (!test_bit(DMA_FENCE_FLAG_PUBLISHED, &fence->flags))
+ return;
+
tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
if (tmp)
lock_release(&dma_fence_lockdep_map, _THIS_IP_);
@@ -302,6 +308,7 @@ void __dma_fence_might_wait(void)
if (tmp)
lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
}
+EXPORT_SYMBOL(__dma_fence_might_wait);
#endif
@@ -370,13 +377,13 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence)
return -EINVAL;
- tmp = dma_fence_begin_signalling();
+ tmp = dma_fence_begin_signalling(fence);
spin_lock_irqsave(fence->lock, flags);
ret = dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
- dma_fence_end_signalling(tmp);
+ dma_fence_end_signalling(fence, tmp);
return ret;
}
@@ -410,7 +417,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
might_sleep();
- __dma_fence_might_wait();
+ __dma_fence_might_wait(fence);
trace_dma_fence_wait_start(fence);
if (fence->ops->wait)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 2f51e40b3528..209385774930 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -116,12 +116,14 @@ static int __init dma_resv_lockdep(void)
if (ret == -EDEADLK)
dma_resv_lock_slow(&obj, &ctx);
fs_reclaim_acquire(GFP_KERNEL);
+#if 0
#ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
- __dma_fence_might_wait();
+ __dma_fence_might_wait(NULL);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
- __dma_fence_might_wait();
+ __dma_fence_might_wait(NULL);
+#endif
#endif
fs_reclaim_release(GFP_KERNEL);
ww_mutex_unlock(&obj.lock);
@@ -291,6 +293,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
count++;
replace:
+ dma_fence_publish(fence);
RCU_INIT_POINTER(fobj->shared[i], fence);
/* pointer update must be visible before we extend the shared_count */
smp_store_mb(fobj->shared_count, count);
@@ -320,8 +323,10 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (old)
i = old->shared_count;
- if (fence)
+ if (fence) {
dma_fence_get(fence);
+ dma_fence_publish(fence);
+ }
preempt_disable();
write_seqcount_begin(&obj->seq);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 5a5a1da01a00..c5dfad960667 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -70,6 +70,7 @@ struct sync_file *sync_file_create(struct dma_fence *fence)
return NULL;
sync_file->fence = dma_fence_get(fence);
+ dma_fence_publish(fence);
return sync_file;
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 42d46414f767..9f8581201f6c 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -594,6 +594,7 @@ int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
}
drm_syncobj_get(syncobj);
+ dma_fence_publish(drm_syncobj_fence_get(syncobj));
fd_install(fd, file);
*p_fd = fd;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 295b9829e2da..b956efdfc0ab 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -469,6 +469,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
+ if (gfpflags_allow_blocking(gfp))
+ __dma_fence_might_wait(dma);
if (dma_fence_is_signaled(dma)) {
i915_sw_fence_set_error_once(fence, dma->error);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 5b74acadaef5..15dd85ecd2d1 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -19,13 +19,13 @@ static void fence_work(struct work_struct *work)
int err;
bool fence_cookie;
- fence_cookie = dma_fence_begin_signalling();
+ fence_cookie = dma_fence_begin_signalling(&f->dma);
err = f->ops->work(f);
if (err)
dma_fence_set_error(&f->dma, err);
fence_complete(f);
- dma_fence_end_signalling(fence_cookie);
+ dma_fence_end_signalling(&f->dma, fence_cookie);
dma_fence_put(&f->dma);
}
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 09e23adb351d..f38680105f36 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -99,6 +99,9 @@ enum dma_fence_flag_bits {
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+#ifdef CONFIG_LOCKDEP
+ DMA_FENCE_FLAG_PUBLISHED,
+#endif
DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
};
@@ -358,16 +361,22 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
}
#ifdef CONFIG_LOCKDEP
-bool dma_fence_begin_signalling(void);
-void dma_fence_end_signalling(bool cookie);
-void __dma_fence_might_wait(void);
+bool dma_fence_begin_signalling(struct dma_fence *fence);
+void dma_fence_end_signalling(struct dma_fence *fence, bool cookie);
+void __dma_fence_might_wait(struct dma_fence *fence);
+static inline void dma_fence_publish(struct dma_fence *fence)
+{
+ __set_bit(DMA_FENCE_FLAG_PUBLISHED, &fence->flags);
+}
#else
-static inline bool dma_fence_begin_signalling(void)
+static inline bool dma_fence_begin_signalling(struct dma_fence *fence)
{
return true;
}
-static inline void dma_fence_end_signalling(bool cookie) {}
-static inline void __dma_fence_might_wait(void) {}
+static inline void dma_fence_end_signalling(struct dma_fence *fence,
+ bool cookie) {}
+static inline void __dma_fence_might_wait(struct dma_fence *fence) {}
+static inline void dma_fence_publish(struct dma_fence *fence) {}
#endif
int dma_fence_signal(struct dma_fence *fence);
--
2.20.1
More information about the Intel-gfx-trybot
mailing list