[Intel-gfx] [PATCH 07/27] drm/i915: Squash repeated awaits on the same fence
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Mon Apr 24 13:03:25 UTC 2017
On 19/04/2017 10:41, Chris Wilson wrote:
> Track the latest fence waited upon on each context, and only add a new
> asynchronous wait if the new fence is more recent than the recorded
> fence for that context. This requires us to filter out unordered
> timelines, which are noted by DMA_FENCE_NO_CONTEXT. However, in the
> absence of a universal identifier, we have to use our own
> i915->mm.unordered_timeline token.
(._.), a bit later... @_@!
What does this fixes and is the complexity worth it?
Regards,
Tvrtko
>
> v2: Throw around the debug crutches
> v3: Inline the likely case of the pre-allocation cache being full.
> v4: Drop the pre-allocation support, we can lose the most recent fence
> in case of allocation failure -- it just means we may emit more awaits
> than strictly necessary but will not break.
> v5: Trim allocation size for leaf nodes, they only need an array of u32
> not pointers.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> ---
> drivers/gpu/drm/i915/i915_gem_request.c | 67 +++---
> drivers/gpu/drm/i915/i915_gem_timeline.c | 260 +++++++++++++++++++++
> drivers/gpu/drm/i915/i915_gem_timeline.h | 14 ++
> drivers/gpu/drm/i915/selftests/i915_gem_timeline.c | 123 ++++++++++
> .../gpu/drm/i915/selftests/i915_mock_selftests.h | 1 +
> 5 files changed, 438 insertions(+), 27 deletions(-)
> create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 97c07986b7c1..fb6c31ba3ef9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -730,9 +730,7 @@ int
> i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
> struct dma_fence *fence)
> {
> - struct dma_fence_array *array;
> int ret;
> - int i;
>
> if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
> return 0;
> @@ -744,39 +742,54 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
> if (fence->context == req->fence.context)
> return 0;
>
> - if (dma_fence_is_i915(fence))
> - return i915_gem_request_await_request(req, to_request(fence));
> + /* Squash repeated waits to the same timelines, picking the latest */
> + if (fence->context != req->i915->mm.unordered_timeline &&
> + intel_timeline_sync_get(req->timeline,
> + fence->context, fence->seqno))
> + return 0;
>
> - if (!dma_fence_is_array(fence)) {
> + if (dma_fence_is_i915(fence)) {
> + ret = i915_gem_request_await_request(req, to_request(fence));
> + if (ret < 0)
> + return ret;
> + } else if (!dma_fence_is_array(fence)) {
> ret = i915_sw_fence_await_dma_fence(&req->submit,
> fence, I915_FENCE_TIMEOUT,
> GFP_KERNEL);
> - return ret < 0 ? ret : 0;
> - }
> -
> - /* Note that if the fence-array was created in signal-on-any mode,
> - * we should *not* decompose it into its individual fences. However,
> - * we don't currently store which mode the fence-array is operating
> - * in. Fortunately, the only user of signal-on-any is private to
> - * amdgpu and we should not see any incoming fence-array from
> - * sync-file being in signal-on-any mode.
> - */
> -
> - array = to_dma_fence_array(fence);
> - for (i = 0; i < array->num_fences; i++) {
> - struct dma_fence *child = array->fences[i];
> -
> - if (dma_fence_is_i915(child))
> - ret = i915_gem_request_await_request(req,
> - to_request(child));
> - else
> - ret = i915_sw_fence_await_dma_fence(&req->submit,
> - child, I915_FENCE_TIMEOUT,
> - GFP_KERNEL);
> if (ret < 0)
> return ret;
> + } else {
> + struct dma_fence_array *array = to_dma_fence_array(fence);
> + int i;
> +
> + /* Note that if the fence-array was created in signal-on-any
> + * mode, we should *not* decompose it into its individual
> + * fences. However, we don't currently store which mode the
> + * fence-array is operating in. Fortunately, the only user of
> + * signal-on-any is private to amdgpu and we should not see any
> + * incoming fence-array from sync-file being in signal-on-any
> + * mode.
> + */
> +
> + for (i = 0; i < array->num_fences; i++) {
> + struct dma_fence *child = array->fences[i];
> +
> + if (dma_fence_is_i915(child))
> + ret = i915_gem_request_await_request(req,
> + to_request(child));
> + else
> + ret = i915_sw_fence_await_dma_fence(&req->submit,
> + child, I915_FENCE_TIMEOUT,
> + GFP_KERNEL);
> + if (ret < 0)
> + return ret;
> + }
> }
>
> + if (fence->context != req->i915->mm.unordered_timeline)
> + intel_timeline_sync_set(req->timeline,
> + fence->context, fence->seqno);
> +
> return 0;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
> index b596ca7ee058..f2b734dda895 100644
> --- a/drivers/gpu/drm/i915/i915_gem_timeline.c
> +++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
> @@ -24,6 +24,254 @@
>
> #include "i915_drv.h"
>
> +#define NSYNC 16
> +#define SHIFT ilog2(NSYNC)
> +#define MASK (NSYNC - 1)
> +
> +/* struct intel_timeline_sync is a layer of a radixtree that maps a u64 fence
> + * context id to the last u32 fence seqno waited upon from that context.
> + * Unlike lib/radixtree it uses a parent pointer that allows traversal back to
> + * the root. This allows us to access the whole tree via a single pointer
> + * to the most recently used layer. We expect fence contexts to be dense
> + * and most reuse to be on the same i915_gem_context but on neighbouring
> + * engines (i.e. on adjacent contexts) and reuse the same leaf, a very
> + * effective lookup cache. If the new lookup is not on the same leaf, we
> + * expect it to be on the neighbouring branch.
> + *
> + * A leaf holds an array of u32 seqno, and has height 0. The bitmap field
> + * allows us to store whether a particular seqno is valid (i.e. allows us
> + * to distinguish unset from 0).
> + *
> + * A branch holds an array of layer pointers, and has height > 0, and always
> + * has at least 2 layers (either branches or leaves) below it.
> + *
> + */
> +struct intel_timeline_sync {
> + u64 prefix;
> + unsigned int height;
> + unsigned int bitmap;
> + struct intel_timeline_sync *parent;
> + /* union {
> + * u32 seqno;
> + * struct intel_timeline_sync *child;
> + * } slot[NSYNC];
> + */
> +};
> +
> +static inline u32 *__sync_seqno(struct intel_timeline_sync *p)
> +{
> + GEM_BUG_ON(p->height);
> + return (u32 *)(p + 1);
> +}
> +
> +static inline struct intel_timeline_sync **
> +__sync_child(struct intel_timeline_sync *p)
> +{
> + GEM_BUG_ON(!p->height);
> + return (struct intel_timeline_sync **)(p + 1);
> +}
> +
> +static inline unsigned int
> +__sync_idx(const struct intel_timeline_sync *p, u64 id)
> +{
> + return (id >> p->height) & MASK;
> +}
> +
> +static void __sync_free(struct intel_timeline_sync *p)
> +{
> + if (p->height) {
> + unsigned int i;
> +
> + while ((i = ffs(p->bitmap))) {
> + p->bitmap &= ~0u << i;
> + __sync_free(__sync_child(p)[i - 1]);
> + }
> + }
> +
> + kfree(p);
> +}
> +
> +static void sync_free(struct intel_timeline_sync *sync)
> +{
> + if (!sync)
> + return;
> +
> + while (sync->parent)
> + sync = sync->parent;
> +
> + __sync_free(sync);
> +}
> +
> +bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno)
> +{
> + struct intel_timeline_sync *p;
> + unsigned int idx;
> +
> + p = tl->sync;
> + if (!p)
> + return false;
> +
> + if (likely((id >> SHIFT) == p->prefix))
> + goto found;
> +
> + /* First climb the tree back to a parent branch */
> + do {
> + p = p->parent;
> + if (!p)
> + return false;
> +
> + if ((id >> p->height >> SHIFT) == p->prefix)
> + break;
> + } while (1);
> +
> + /* And then descend again until we find our leaf */
> + do {
> + if (!p->height)
> + break;
> +
> + p = __sync_child(p)[__sync_idx(p, id)];
> + if (!p)
> + return false;
> +
> + if ((id >> p->height >> SHIFT) != p->prefix)
> + return false;
> + } while (1);
> +
> + tl->sync = p;
> +found:
> + idx = id & MASK;
> + if (!(p->bitmap & BIT(idx)))
> + return false;
> +
> + return i915_seqno_passed(__sync_seqno(p)[idx], seqno);
> +}
> +
> +static noinline int
> +__intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno)
> +{
> + struct intel_timeline_sync *p = tl->sync;
> + unsigned int idx;
> +
> + if (!p) {
> + p = kzalloc(sizeof(*p) + NSYNC * sizeof(seqno), GFP_KERNEL);
> + if (unlikely(!p))
> + return -ENOMEM;
> +
> + p->prefix = id >> SHIFT;
> + goto found;
> + }
> +
> + /* Climb back up the tree until we find a common prefix */
> + do {
> + if (!p->parent)
> + break;
> +
> + p = p->parent;
> +
> + if ((id >> p->height >> SHIFT) == p->prefix)
> + break;
> + } while (1);
> +
> + /* No shortcut, we have to descend the tree to find the right layer
> + * containing this fence.
> + *
> + * Each layer in the tree holds 16 (NSYNC) pointers, either fences
> + * or lower layers. Leaf nodes (height = 0) contain the fences, all
> + * other nodes (height > 0) are internal layers that point to a lower
> + * node. Each internal layer has at least 2 descendents.
> + *
> + * Starting at the top, we check whether the current prefix matches. If
> + * it doesn't, we have gone passed our layer and need to insert a join
> + * into the tree, and a new leaf node as a descendent as well as the
> + * original layer.
> + *
> + * The matching prefix means we are still following the right branch
> + * of the tree. If it has height 0, we have found our leaf and just
> + * need to replace the fence slot with ourselves. If the height is
> + * not zero, our slot contains the next layer in the tree (unless
> + * it is empty, in which case we can add ourselves as a new leaf).
> + * As descend the tree the prefix grows (and height decreases).
> + */
> + do {
> + struct intel_timeline_sync *next;
> +
> + if ((id >> p->height >> SHIFT) != p->prefix) {
> + /* insert a join above the current layer */
> + next = kzalloc(sizeof(*next) + NSYNC * sizeof(next),
> + GFP_KERNEL);
> + if (unlikely(!next))
> + return -ENOMEM;
> +
> + next->height = ALIGN(fls64((id >> p->height >> SHIFT) ^ p->prefix),
> + SHIFT) + p->height;
> + next->prefix = id >> next->height >> SHIFT;
> +
> + if (p->parent)
> + __sync_child(p->parent)[__sync_idx(p->parent, id)] = next;
> + next->parent = p->parent;
> +
> + idx = p->prefix >> (next->height - p->height - SHIFT) & MASK;
> + __sync_child(next)[idx] = p;
> + next->bitmap |= BIT(idx);
> + p->parent = next;
> +
> + /* ascend to the join */
> + p = next;
> + } else {
> + if (!p->height)
> + break;
> + }
> +
> + /* descend into the next layer */
> + GEM_BUG_ON(!p->height);
> + idx = __sync_idx(p, id);
> + next = __sync_child(p)[idx];
> + if (unlikely(!next)) {
> + next = kzalloc(sizeof(*next) + NSYNC * sizeof(seqno),
> + GFP_KERNEL);
> + if (unlikely(!next))
> + return -ENOMEM;
> +
> + __sync_child(p)[idx] = next;
> + p->bitmap |= BIT(idx);
> + next->parent = p;
> + next->prefix = id >> SHIFT;
> +
> + p = next;
> + break;
> + }
> +
> + p = next;
> + } while (1);
> +
> +found:
> + GEM_BUG_ON(p->height);
> + GEM_BUG_ON(p->prefix != id >> SHIFT);
> + tl->sync = p;
> + idx = id & MASK;
> + __sync_seqno(p)[idx] = seqno;
> + p->bitmap |= BIT(idx);
> + return 0;
> +}
> +
> +int intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno)
> +{
> + struct intel_timeline_sync *p = tl->sync;
> +
> + /* We expect to be called in sequence following a _get(id), which
> + * should have preloaded the tl->sync hint for us.
> + */
> + if (likely(p && (id >> SHIFT) == p->prefix)) {
> + unsigned int idx = id & MASK;
> +
> + __sync_seqno(p)[idx] = seqno;
> + p->bitmap |= BIT(idx);
> + return 0;
> + }
> +
> + return __intel_timeline_sync_set(tl, id, seqno);
> +}
> +
> static int __i915_gem_timeline_init(struct drm_i915_private *i915,
> struct i915_gem_timeline *timeline,
> const char *name,
> @@ -35,6 +283,12 @@ static int __i915_gem_timeline_init(struct drm_i915_private *i915,
>
> lockdep_assert_held(&i915->drm.struct_mutex);
>
> + /* Ideally we want a set of engines on a single leaf as we expect
> + * to mostly be tracking synchronisation between engines.
> + */
> + BUILD_BUG_ON(NSYNC < I915_NUM_ENGINES);
> + BUILD_BUG_ON(NSYNC > BITS_PER_BYTE * sizeof(timeline->engine[0].sync->bitmap));
> +
> timeline->i915 = i915;
> timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
> if (!timeline->name)
> @@ -91,8 +345,14 @@ void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
> struct intel_timeline *tl = &timeline->engine[i];
>
> GEM_BUG_ON(!list_empty(&tl->requests));
> +
> + sync_free(tl->sync);
> }
>
> list_del(&timeline->link);
> kfree(timeline->name);
> }
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftests/i915_gem_timeline.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
> index 6c53e14cab2a..c33dee0025ee 100644
> --- a/drivers/gpu/drm/i915/i915_gem_timeline.h
> +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
> @@ -26,10 +26,13 @@
> #define I915_GEM_TIMELINE_H
>
> #include <linux/list.h>
> +#include <linux/radix-tree.h>
>
> +#include "i915_utils.h"
> #include "i915_gem_request.h"
>
> struct i915_gem_timeline;
> +struct intel_timeline_sync;
>
> struct intel_timeline {
> u64 fence_context;
> @@ -55,6 +58,14 @@ struct intel_timeline {
> * struct_mutex.
> */
> struct i915_gem_active last_request;
> +
> + /* We track the most recent seqno that we wait on in every context so
> + * that we only have to emit a new await and dependency on a more
> + * recent sync point. As the contexts may executed out-of-order, we
> + * have to track each individually and cannot not rely on an absolute
> + * global_seqno.
> + */
> + struct intel_timeline_sync *sync;
> u32 sync_seqno[I915_NUM_ENGINES];
>
> struct i915_gem_timeline *common;
> @@ -75,4 +86,7 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
> int i915_gem_timeline_init__global(struct drm_i915_private *i915);
> void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
>
> +bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno);
> +int intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno);
> +
> #endif
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
> new file mode 100644
> index 000000000000..c0bb8ecac93b
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
> @@ -0,0 +1,123 @@
> +/*
> + * Copyright © 2017 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + */
> +
> +#include "../i915_selftest.h"
> +#include "mock_gem_device.h"
> +
> +static int igt_seqmap(void *arg)
> +{
> + struct drm_i915_private *i915 = arg;
> + const struct {
> + const char *name;
> + u32 seqno;
> + bool expected;
> + bool set;
> + } pass[] = {
> + { "unset", 0, false, false },
> + { "new", 0, false, true },
> + { "0a", 0, true, true },
> + { "1a", 1, false, true },
> + { "1b", 1, true, true },
> + { "0b", 0, true, false },
> + { "2a", 2, false, true },
> + { "4", 4, false, true },
> + { "INT_MAX", INT_MAX, false, true },
> + { "INT_MAX-1", INT_MAX-1, true, false },
> + { "INT_MAX+1", (u32)INT_MAX+1, false, true },
> + { "INT_MAX", INT_MAX, true, false },
> + { "UINT_MAX", UINT_MAX, false, true },
> + { "wrap", 0, false, true },
> + { "unwrap", UINT_MAX, true, false },
> + {},
> + }, *p;
> + struct intel_timeline *tl;
> + int order, offset;
> + int ret;
> +
> + tl = &i915->gt.global_timeline.engine[RCS];
> + for (p = pass; p->name; p++) {
> + for (order = 1; order < 64; order++) {
> + for (offset = -1; offset <= (order > 1); offset++) {
> + u64 ctx = BIT_ULL(order) + offset;
> +
> + if (intel_timeline_sync_get(tl,
> + ctx,
> + p->seqno) != p->expected) {
> + pr_err("1: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
> + p->name, ctx, p->seqno, yesno(p->expected));
> + return -EINVAL;
> + }
> +
> + if (p->set) {
> + ret = intel_timeline_sync_set(tl, ctx, p->seqno);
> + if (ret)
> + return ret;
> + }
> + }
> + }
> + }
> +
> + tl = &i915->gt.global_timeline.engine[BCS];
> + for (order = 1; order < 64; order++) {
> + for (offset = -1; offset <= (order > 1); offset++) {
> + u64 ctx = BIT_ULL(order) + offset;
> +
> + for (p = pass; p->name; p++) {
> + if (intel_timeline_sync_get(tl,
> + ctx,
> + p->seqno) != p->expected) {
> + pr_err("2: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
> + p->name, ctx, p->seqno, yesno(p->expected));
> + return -EINVAL;
> + }
> +
> + if (p->set) {
> + ret = intel_timeline_sync_set(tl, ctx, p->seqno);
> + if (ret)
> + return ret;
> + }
> + }
> + }
> + }
> +
> + return 0;
> +}
> +
> +int i915_gem_timeline_mock_selftests(void)
> +{
> + static const struct i915_subtest tests[] = {
> + SUBTEST(igt_seqmap),
> + };
> + struct drm_i915_private *i915;
> + int err;
> +
> + i915 = mock_gem_device();
> + if (!i915)
> + return -ENOMEM;
> +
> + err = i915_subtests(tests, i915);
> + drm_dev_unref(&i915->drm);
> +
> + return err;
> +}
> diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
> index be9a9ebf5692..8d0f50c25df8 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
> @@ -12,6 +12,7 @@ selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
> selftest(scatterlist, scatterlist_mock_selftests)
> selftest(uncore, intel_uncore_mock_selftests)
> selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
> +selftest(timelines, i915_gem_timeline_mock_selftests)
> selftest(requests, i915_gem_request_mock_selftests)
> selftest(objects, i915_gem_object_mock_selftests)
> selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
>
More information about the Intel-gfx
mailing list