[PATCH 1/2] dma-buf: nuke DMA_FENCE_TRACE macros v2

Christian König ckoenig.leichtzumerken at gmail.com
Mon Aug 30 06:28:49 UTC 2021


Am 27.08.21 um 22:23 schrieb Daniel Vetter:
> On Fri, Aug 27, 2021 at 11:07:58AM +0200, Christian König wrote:
>> Am 26.08.21 um 10:55 schrieb Daniel Vetter:
>>> On Tue, Aug 24, 2021 at 10:12:24AM +0200, Christian König wrote:
>>>> Just a gentle ping. Daniel any more comments on this?
>>> Still haven't seen a patch set to nuke the sw_sync igt tests. Otherwise
>>> this is just going to cause fails and reboots in our ci (we reboot on
>>> taints).
>> *sigh* can I at least print a warning without breaking the igt tests?
> CI watches dmesg too ... it just doesn't force a reboot (which hurts run
> rate really badly).
>
>>>> I'm not sure if the second patch will cause trouble with any unit test, but
>>>> I'm willing to try it. We can always trivial revert it.
>>> See above, remove the igts and we should be fine I think. I don't think
>>> there's any selftests or kselftests, but checking that should be a quick
>>> grep at most.
>> Yeah, we don't have any selftests as far as I can see but this stuff is so
>> interweaved with igt that it will be hard to remove I think.
>>
>> A good bunch of the igt code seems to have been moved to using VGEM instead,
>> but as far as I can see there is still plenty left relying on this.
>>
>> Alternatively could we make the config option depend on CONFIG_DEBUG?
> Hm I thought it was just down to sw_sync igt testcase, and everything else
> is moved to vgem. Do we have more, or has more landed since I looked a
> while ago?

The code under lib/sw_sync.c uses this and based on that 
lib/igt_dummyload.c defines an IGT_CORK_FENCE which is then used by at 
least:

tests/i915/gem_exec_fence.c
tests/i915/gem_eio.c
tests/i915/gem_exec_schedule.c
tests/i915/gem_exec_balancer.c
tests/i915/gem_ctx_shared.c
tests/kms_busy.c

After that I've stoped looking deeper into it.

Christian.

> -Daniel
>
>> Christian.
>>
>>> -Daniel
>>>
>>>> Thanks,
>>>> Christian.
>>>>
>>>> Am 18.08.21 um 12:54 schrieb Christian König:
>>>>> Only the DRM GPU scheduler, radeon and amdgpu where using them and they depend
>>>>> on a non existing config option to actually emit some code.
>>>>>
>>>>> v2: keep the signal path as is for now
>>>>>
>>>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>>>> ---
>>>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 10 +---------
>>>>>     drivers/gpu/drm/radeon/radeon_fence.c     | 24 ++++-------------------
>>>>>     drivers/gpu/drm/scheduler/sched_fence.c   | 18 ++---------------
>>>>>     include/linux/dma-fence.h                 | 22 ---------------------
>>>>>     4 files changed, 7 insertions(+), 67 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>>> index 0b1c48590c43..c65994e382bd 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>>> @@ -246,7 +246,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
>>>>>     	struct amdgpu_fence_driver *drv = &ring->fence_drv;
>>>>>     	struct amdgpu_device *adev = ring->adev;
>>>>>     	uint32_t seq, last_seq;
>>>>> -	int r;
>>>>>     	do {
>>>>>     		last_seq = atomic_read(&ring->fence_drv.last_seq);
>>>>> @@ -278,12 +277,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
>>>>>     		if (!fence)
>>>>>     			continue;
>>>>> -		r = dma_fence_signal(fence);
>>>>> -		if (!r)
>>>>> -			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
>>>>> -		else
>>>>> -			BUG();
>>>>> -
>>>>> +		dma_fence_signal(fence);
>>>>>     		dma_fence_put(fence);
>>>>>     		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
>>>>>     		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>>>> @@ -639,8 +633,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
>>>>>     	if (!timer_pending(&ring->fence_drv.fallback_timer))
>>>>>     		amdgpu_fence_schedule_fallback(ring);
>>>>> -	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
>>>>> -
>>>>>     	return true;
>>>>>     }
>>>>> diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
>>>>> index 18f2c2e0dfb3..3f351d222cbb 100644
>>>>> --- a/drivers/gpu/drm/radeon/radeon_fence.c
>>>>> +++ b/drivers/gpu/drm/radeon/radeon_fence.c
>>>>> @@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
>>>>>     	 */
>>>>>     	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
>>>>>     	if (seq >= fence->seq) {
>>>>> -		int ret = dma_fence_signal_locked(&fence->base);
>>>>> -
>>>>> -		if (!ret)
>>>>> -			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
>>>>> -		else
>>>>> -			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
>>>>> -
>>>>> +		dma_fence_signal_locked(&fence->base);
>>>>>     		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
>>>>>     		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
>>>>>     		dma_fence_put(&fence->base);
>>>>> -	} else
>>>>> -		DMA_FENCE_TRACE(&fence->base, "pending\n");
>>>>> +	}
>>>>>     	return 0;
>>>>>     }
>>>>> @@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
>>>>>     	fence->fence_wake.func = radeon_fence_check_signaled;
>>>>>     	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
>>>>>     	dma_fence_get(f);
>>>>> -
>>>>> -	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
>>>>>     	return true;
>>>>>     }
>>>>> @@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
>>>>>     		return true;
>>>>>     	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
>>>>> -		int ret;
>>>>> -
>>>>> -		ret = dma_fence_signal(&fence->base);
>>>>> -		if (!ret)
>>>>> -			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
>>>>> +		dma_fence_signal(&fence->base);
>>>>>     		return true;
>>>>>     	}
>>>>>     	return false;
>>>>> @@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
>>>>>     {
>>>>>     	uint64_t seq[RADEON_NUM_RINGS] = {};
>>>>>     	long r;
>>>>> -	int r_sig;
>>>>>     	/*
>>>>>     	 * This function should not be called on !radeon fences.
>>>>> @@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
>>>>>     		return r;
>>>>>     	}
>>>>> -	r_sig = dma_fence_signal(&fence->base);
>>>>> -	if (!r_sig)
>>>>> -		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
>>>>> +	dma_fence_signal(&fence->base);
>>>>>     	return r;
>>>>>     }
>>>>> diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
>>>>> index 69de2c76731f..3736746c47bd 100644
>>>>> --- a/drivers/gpu/drm/scheduler/sched_fence.c
>>>>> +++ b/drivers/gpu/drm/scheduler/sched_fence.c
>>>>> @@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
>>>>>     void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
>>>>>     {
>>>>> -	int ret = dma_fence_signal(&fence->scheduled);
>>>>> -
>>>>> -	if (!ret)
>>>>> -		DMA_FENCE_TRACE(&fence->scheduled,
>>>>> -				"signaled from irq context\n");
>>>>> -	else
>>>>> -		DMA_FENCE_TRACE(&fence->scheduled,
>>>>> -				"was already signaled\n");
>>>>> +	dma_fence_signal(&fence->scheduled);
>>>>>     }
>>>>>     void drm_sched_fence_finished(struct drm_sched_fence *fence)
>>>>>     {
>>>>> -	int ret = dma_fence_signal(&fence->finished);
>>>>> -
>>>>> -	if (!ret)
>>>>> -		DMA_FENCE_TRACE(&fence->finished,
>>>>> -				"signaled from irq context\n");
>>>>> -	else
>>>>> -		DMA_FENCE_TRACE(&fence->finished,
>>>>> -				"was already signaled\n");
>>>>> +	dma_fence_signal(&fence->finished);
>>>>>     }
>>>>>     static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
>>>>> diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
>>>>> index 6ffb4b2c6371..4cc119ab272f 100644
>>>>> --- a/include/linux/dma-fence.h
>>>>> +++ b/include/linux/dma-fence.h
>>>>> @@ -590,26 +590,4 @@ struct dma_fence *dma_fence_get_stub(void);
>>>>>     struct dma_fence *dma_fence_allocate_private_stub(void);
>>>>>     u64 dma_fence_context_alloc(unsigned num);
>>>>> -#define DMA_FENCE_TRACE(f, fmt, args...) \
>>>>> -	do {								\
>>>>> -		struct dma_fence *__ff = (f);				\
>>>>> -		if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE))			\
>>>>> -			pr_info("f %llu#%llu: " fmt,			\
>>>>> -				__ff->context, __ff->seqno, ##args);	\
>>>>> -	} while (0)
>>>>> -
>>>>> -#define DMA_FENCE_WARN(f, fmt, args...) \
>>>>> -	do {								\
>>>>> -		struct dma_fence *__ff = (f);				\
>>>>> -		pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
>>>>> -			 ##args);					\
>>>>> -	} while (0)
>>>>> -
>>>>> -#define DMA_FENCE_ERR(f, fmt, args...) \
>>>>> -	do {								\
>>>>> -		struct dma_fence *__ff = (f);				\
>>>>> -		pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno,	\
>>>>> -			##args);					\
>>>>> -	} while (0)
>>>>> -
>>>>>     #endif /* __LINUX_DMA_FENCE_H */



More information about the dri-devel mailing list