[Intel-gfx] [PATCH v2 5/9] drm/i915: Squash repeated awaits on the same fence
Chris Wilson
chris at chris-wilson.co.uk
Wed Apr 12 12:48:21 UTC 2017
Track the latest fence waited upon on each context, and only add a new
asynchronous wait if the new fence is more recent than the recorded
fence for that context. This requires us to filter out unordered
timelines, which are noted by DMA_FENCE_NO_CONTEXT. However, in the
absence of a universal identifier, we have to use our own
i915->mm.unordered_timeline token.
v2: Throw around the debug crutches
v3: Inline the likely case of the pre-allocation cache being full.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_request.c | 72 ++++---
drivers/gpu/drm/i915/i915_gem_timeline.c | 217 +++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_timeline.h | 34 ++++
drivers/gpu/drm/i915/selftests/i915_gem_timeline.c | 127 ++++++++++++
.../gpu/drm/i915/selftests/i915_mock_selftests.h | 1 +
5 files changed, 424 insertions(+), 27 deletions(-)
create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 31874a38752e..6e68387aa097 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -714,9 +714,7 @@ int
i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
struct dma_fence *fence)
{
- struct dma_fence_array *array;
int ret;
- int i;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return 0;
@@ -728,39 +726,59 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
if (fence->context == req->fence.context)
return 0;
- if (dma_fence_is_i915(fence))
- return i915_gem_request_await_request(req, to_request(fence));
+ /* Squash repeated waits to the same timelines, picking the latest */
+ if (fence->context != req->i915->mm.unordered_timeline) {
+ if (intel_timeline_sync_get(req->timeline,
+ fence->context,
+ fence->seqno))
+ return 0;
- if (!dma_fence_is_array(fence)) {
+ ret = intel_timeline_sync_reserve(req->timeline);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (dma_fence_is_i915(fence)) {
+ ret = i915_gem_request_await_request(req, to_request(fence));
+ if (ret < 0)
+ return ret;
+ } else if (!dma_fence_is_array(fence)) {
ret = i915_sw_fence_await_dma_fence(&req->submit,
fence, I915_FENCE_TIMEOUT,
GFP_KERNEL);
- return ret < 0 ? ret : 0;
- }
-
- /* Note that if the fence-array was created in signal-on-any mode,
- * we should *not* decompose it into its individual fences. However,
- * we don't currently store which mode the fence-array is operating
- * in. Fortunately, the only user of signal-on-any is private to
- * amdgpu and we should not see any incoming fence-array from
- * sync-file being in signal-on-any mode.
- */
-
- array = to_dma_fence_array(fence);
- for (i = 0; i < array->num_fences; i++) {
- struct dma_fence *child = array->fences[i];
-
- if (dma_fence_is_i915(child))
- ret = i915_gem_request_await_request(req,
- to_request(child));
- else
- ret = i915_sw_fence_await_dma_fence(&req->submit,
- child, I915_FENCE_TIMEOUT,
- GFP_KERNEL);
if (ret < 0)
return ret;
+ } else {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ /* Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+
+ for (i = 0; i < array->num_fences; i++) {
+ struct dma_fence *child = array->fences[i];
+
+ if (dma_fence_is_i915(child))
+ ret = i915_gem_request_await_request(req,
+ to_request(child));
+ else
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ child, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
}
+ if (fence->context != req->i915->mm.unordered_timeline)
+ intel_timeline_sync_set(req->timeline,
+ fence->context, fence->seqno);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index b596ca7ee058..c25edfde4833 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -24,6 +24,217 @@
#include "i915_drv.h"
+#define SHIFT ilog2(NSEQMAP)
+#define MASK (NSEQMAP - 1)
+
+static void seqmap_free_layers(struct seqmap_layer *p)
+{
+ unsigned int i;
+
+ if (p->height) {
+ for (; (i = ffs(p->bitmap)); p->bitmap &= ~0u << i)
+ seqmap_free_layers(p->slot[i - 1]);
+ }
+
+ kfree(p);
+}
+
+static void seqmap_free(struct seqmap *seqmap)
+{
+ if (seqmap->top)
+ seqmap_free_layers(seqmap->top);
+
+ while (seqmap->freed) {
+ struct seqmap_layer *p;
+
+ p = ptr_mask_bits(seqmap->freed, SEQMAP_COUNT_BITS);
+ seqmap->freed = p->parent;
+ kfree(p);
+ }
+}
+
+__malloc static struct seqmap_layer *
+seqmap_alloc_layer(struct seqmap *shared)
+{
+ struct seqmap_layer *p;
+
+ GEM_BUG_ON(!shared->freed);
+
+ p = ptr_mask_bits(shared->freed, SEQMAP_COUNT_BITS);
+ shared->freed = p->parent;
+
+ return p;
+}
+
+static int layer_idx(const struct seqmap_layer *p, u64 id)
+{
+ return (id >> p->height) & MASK;
+}
+
+bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno)
+{
+ struct seqmap *shared = &tl->sync;
+ struct seqmap_layer *p;
+ unsigned int idx;
+
+ p = shared->hint;
+ if (!p)
+ return false;
+
+ if ((id >> SHIFT) == p->prefix)
+ goto found;
+
+ p = shared->top;
+ do {
+ if ((id >> p->height >> SHIFT) != p->prefix)
+ return false;
+
+ if (!p->height)
+ break;
+
+ p = p->slot[layer_idx(p, id)];
+ if (!p)
+ return false;
+ } while (1);
+
+ shared->hint = p;
+found:
+ idx = id & MASK;
+ if (!(p->bitmap & BIT(idx)))
+ return false;
+
+ return i915_seqno_passed((uintptr_t)p->slot[idx], seqno);
+}
+
+void intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno)
+{
+ struct seqmap *shared = &tl->sync;
+ struct seqmap_layer *p, *cur;
+ unsigned int idx;
+
+ /* We expect to be called in sequence following a _get(id), which
+ * should have preloaded the ->hint for us.
+ */
+ p = shared->hint;
+ if (likely(p && (id >> SHIFT) == p->prefix))
+ goto found_layer;
+
+ if (!p) {
+ GEM_BUG_ON(shared->top);
+ cur = seqmap_alloc_layer(shared);
+ cur->parent = NULL;
+ shared->top = cur;
+ goto new_layer;
+ }
+
+ /* No shortcut, we have to descend the tree to find the right layer
+ * containing this fence.
+ *
+ * Each layer in the tree holds 16 (NSEQMAP) pointers, either fences
+ * or lower layers. Leaf nodes (height = 0) contain the fences, all
+ * other nodes (height > 0) are internal layers that point to a lower
+ * node. Each internal layer has at least 2 descendents.
+ *
+ * Starting at the top, we check whether the current prefix matches. If
+ * it doesn't, we have gone passed our layer and need to insert a join
+ * into the tree, and a new leaf node as a descendent as well as the
+ * original layer.
+ *
+ * The matching prefix means we are still following the right branch
+ * of the tree. If it has height 0, we have found our leaf and just
+ * need to replace the fence slot with ourselves. If the height is
+ * not zero, our slot contains the next layer in the tree (unless
+ * it is empty, in which case we can add ourselves as a new leaf).
+ * As descend the tree the prefix grows (and height decreases).
+ */
+ p = shared->top;
+ do {
+ if ((id >> p->height >> SHIFT) != p->prefix) {
+ /* insert a join above the current layer */
+ cur = seqmap_alloc_layer(shared);
+ cur->height = ALIGN(fls64((id >> p->height >> SHIFT) ^ p->prefix),
+ SHIFT) + p->height;
+ cur->prefix = id >> cur->height >> SHIFT;
+
+ if (p->parent)
+ p->parent->slot[layer_idx(p->parent, id)] = cur;
+ else
+ shared->top = cur;
+ cur->parent = p->parent;
+
+ idx = p->prefix >> (cur->height - p->height - SHIFT) & MASK;
+ cur->slot[idx] = p;
+ cur->bitmap |= BIT(idx);
+ p->parent = cur;
+ } else if (!p->height) {
+ /* matching base layer */
+ shared->hint = p;
+ goto found_layer;
+ } else {
+ /* descend into the next layer */
+ idx = layer_idx(p, id);
+ cur = p->slot[idx];
+ if (unlikely(!cur)) {
+ cur = seqmap_alloc_layer(shared);
+ p->slot[idx] = cur;
+ p->bitmap |= BIT(idx);
+ cur->parent = p;
+ goto new_layer;
+ }
+ }
+
+ p = cur;
+ } while (1);
+
+found_layer:
+ GEM_BUG_ON(p->height);
+ GEM_BUG_ON(p->prefix != id >> SHIFT);
+ idx = id & MASK;
+ p->slot[idx] = (void *)(uintptr_t)seqno;
+ p->bitmap |= BIT(idx);
+ GEM_BUG_ON(shared->hint != p);
+ return;
+
+new_layer:
+ GEM_BUG_ON(cur->height);
+ cur->prefix = id >> SHIFT;
+ idx = id & MASK;
+ cur->slot[idx] = (void *)(uintptr_t)seqno;
+ cur->bitmap = BIT(idx);
+ shared->hint = cur;
+ return;
+}
+
+int __intel_timeline_sync_reserve(struct intel_timeline *tl)
+{
+ struct seqmap *shared = &tl->sync;
+ int count;
+
+ might_sleep();
+
+ /* To guarantee being able to replace a fence in the radixtree,
+ * we need at most 2 layers: one to create a join in the tree,
+ * and one to contain the fence. Typically we expect to reuse
+ * a layer and so avoid any insertions.
+ *
+ * We use the low bits of the freed list to track its length
+ * since we only need a couple of bits.
+ */
+ count = ptr_unmask_bits(shared->freed, SEQMAP_COUNT_BITS);
+ while (count++ < 2) {
+ struct seqmap_layer *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ p->parent = shared->freed;
+ shared->freed = ptr_pack_bits(p, count, SEQMAP_COUNT_BITS);
+ }
+
+ return 0;
+}
+
static int __i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *timeline,
const char *name,
@@ -91,8 +302,14 @@ void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
struct intel_timeline *tl = &timeline->engine[i];
GEM_BUG_ON(!list_empty(&tl->requests));
+
+ seqmap_free(&tl->sync);
}
list_del(&timeline->link);
kfree(timeline->name);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 6c53e14cab2a..4cb597401803 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -26,11 +26,30 @@
#define I915_GEM_TIMELINE_H
#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include "i915_utils.h"
#include "i915_gem_request.h"
struct i915_gem_timeline;
+#define NSEQMAP 16
+
+struct seqmap_layer {
+ u64 prefix;
+ unsigned int height;
+ unsigned int bitmap;
+ struct seqmap_layer *parent;
+ void *slot[NSEQMAP];
+};
+
+struct seqmap {
+ struct seqmap_layer *hint;
+ struct seqmap_layer *top;
+ struct seqmap_layer *freed;
+#define SEQMAP_COUNT_BITS 2
+};
+
struct intel_timeline {
u64 fence_context;
u32 seqno;
@@ -55,6 +74,8 @@ struct intel_timeline {
* struct_mutex.
*/
struct i915_gem_active last_request;
+
+ struct seqmap sync;
u32 sync_seqno[I915_NUM_ENGINES];
struct i915_gem_timeline *common;
@@ -75,4 +96,17 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
int i915_gem_timeline_init__global(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno);
+
+int __intel_timeline_sync_reserve(struct intel_timeline *tl);
+static inline int intel_timeline_sync_reserve(struct intel_timeline *tl)
+{
+ if (likely(ptr_unmask_bits(tl->sync.freed, SEQMAP_COUNT_BITS) == 2))
+ return 0;
+
+ return __intel_timeline_sync_reserve(tl);
+}
+
+void intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno);
+
#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
new file mode 100644
index 000000000000..15e2b2e63081
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "mock_gem_device.h"
+
+static int igt_seqmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct {
+ const char *name;
+ u32 seqno;
+ bool expected;
+ bool set;
+ } pass[] = {
+ { "unset", 0, false, false },
+ { "new", 0, false, true },
+ { "0a", 0, true, true },
+ { "1a", 1, false, true },
+ { "1b", 1, true, true },
+ { "0b", 0, true, false },
+ { "2a", 2, false, true },
+ { "4", 4, false, true },
+ { "INT_MAX", INT_MAX, false, true },
+ { "INT_MAX-1", INT_MAX-1, true, false },
+ { "INT_MAX+1", (u32)INT_MAX+1, false, true },
+ { "INT_MAX", INT_MAX, true, false },
+ { "UINT_MAX", UINT_MAX, false, true },
+ { "wrap", 0, false, true },
+ { "unwrap", UINT_MAX, true, false },
+ {},
+ }, *p;
+ struct intel_timeline *tl;
+ int order, offset;
+ int ret;
+
+ tl = &i915->gt.global_timeline.engine[RCS];
+ for (p = pass; p->name; p++) {
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ if (intel_timeline_sync_get(tl,
+ ctx,
+ p->seqno) != p->expected) {
+ pr_err("1: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
+ p->name, ctx, p->seqno, yesno(p->expected));
+ return -EINVAL;
+ }
+
+ if (p->set) {
+ ret = intel_timeline_sync_reserve(tl);
+ if (ret)
+ return ret;
+
+ intel_timeline_sync_set(tl, ctx, p->seqno);
+ }
+ }
+ }
+ }
+
+ tl = &i915->gt.global_timeline.engine[BCS];
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ for (p = pass; p->name; p++) {
+ if (intel_timeline_sync_get(tl,
+ ctx,
+ p->seqno) != p->expected) {
+ pr_err("2: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
+ p->name, ctx, p->seqno, yesno(p->expected));
+ return -EINVAL;
+ }
+
+ if (p->set) {
+ ret = intel_timeline_sync_reserve(tl);
+ if (ret)
+ return ret;
+
+ intel_timeline_sync_set(tl, ctx, p->seqno);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int i915_gem_timeline_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_seqmap),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+ drm_dev_unref(&i915->drm);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index be9a9ebf5692..8d0f50c25df8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -12,6 +12,7 @@ selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
selftest(scatterlist, scatterlist_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
+selftest(timelines, i915_gem_timeline_mock_selftests)
selftest(requests, i915_gem_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
--
2.11.0
More information about the Intel-gfx
mailing list