Am 21.10.21 um 12:35 schrieb Maarten Lankhorst:
From: Christian König christian.koenig@amd.com
Simplifying the code a bit.
Signed-off-by: Christian König christian.koenig@amd.com [mlankhorst: Handle timeout = 0 correctly, use new i915_request_wait_timeout.] Signed-off-by: Maarten Lankhorst maarten.lankhorst@linux.intel.com
LGTM, do you want to push it or should I pick it up into drm-misc-next?
Christian.
drivers/gpu/drm/i915/gem/i915_gem_wait.c | 65 ++++++++---------------- 1 file changed, 20 insertions(+), 45 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index f909aaa09d9c..840c13706999 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -25,7 +25,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, return timeout;
if (dma_fence_is_i915(fence))
return i915_request_wait(to_request(fence), flags, timeout);
return i915_request_wait_timeout(to_request(fence), flags, timeout);
return dma_fence_wait_timeout(fence, flags & I915_WAIT_INTERRUPTIBLE,
@@ -37,58 +37,29 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, long timeout) {
- struct dma_fence *excl;
- bool prune_fences = false;
- if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
unsigned int count, i;
int ret;
ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout);
if (timeout < 0)
break;
dma_fence_put(shared[i]);
}
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long ret = timeout ?: 1;
- dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
- dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = i915_gem_object_wait_fence(fence, flags, timeout);
if (ret <= 0)
break;
/*
* If both shared fences and an exclusive fence exist,
* then by construction the shared fences must be later
* than the exclusive fence. If we successfully wait for
* all the shared fences, we know that the exclusive fence
* must all be signaled. If all the shared fences are
* signaled, we can prune the array and recover the
* floating references on the fences/requests.
*/
prune_fences = count && timeout >= 0;
- } else {
excl = dma_resv_get_excl_unlocked(resv);
if (timeout)
}timeout = ret;
- if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout);
- dma_fence_put(excl);
dma_resv_iter_end(&cursor);
/*
- Opportunistically prune the fences iff we know they have *all* been
- signaled.
*/
- if (prune_fences)
- if (timeout > 0) dma_resv_prune(resv);
- return timeout;
return ret; }
static void fence_set_priority(struct dma_fence *fence,
@@ -196,7 +167,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->base.resv, flags, timeout);
- return timeout < 0 ? timeout : 0;
if (timeout < 0)
return timeout;
return !timeout ? -ETIME : 0; }
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)