[PATCH 90/91] drm/i915: Eliminate unnecessary VMA calls for multi-BB submission
Matthew Brost
matthew.brost at intel.com
Tue Jul 20 05:35:02 UTC 2021
Certain VMA functions in the execbuf IOCTL only need to be called on
first or last BB of a multi-BB submission. eb_relocate() on the first
and eb_release_vmas() on the last. Doing so will save CPU / GPU cycles.
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 149 ++++++++++--------
.../i915/gem/selftests/i915_gem_execbuffer.c | 14 +-
2 files changed, 89 insertions(+), 74 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 2ef43cf0f1e9..ba29426981a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -272,7 +272,7 @@ struct i915_execbuffer {
/** list of vma that have execobj.relocation_count */
struct list_head relocs;
- struct i915_gem_ww_ctx ww;
+ struct i915_gem_ww_ctx *ww;
/**
* Track the most recently used object for relocations, as we
@@ -450,7 +450,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags |= PIN_GLOBAL;
/* Attempt to reuse the current location if available */
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
+ err = i915_vma_pin_ww(vma, eb->ww, 0, 0, pin_flags);
if (err == -EDEADLK)
return err;
@@ -459,11 +459,11 @@ eb_pin_vma(struct i915_execbuffer *eb,
return err;
/* Failing that pick any _free_ space if suitable */
- err = i915_vma_pin_ww(vma, &eb->ww,
- entry->pad_to_size,
- entry->alignment,
- eb_pin_flags(entry, ev->flags) |
- PIN_USER | PIN_NOEVICT);
+ err = i915_vma_pin_ww(vma, eb->ww,
+ entry->pad_to_size,
+ entry->alignment,
+ eb_pin_flags(entry, ev->flags) |
+ PIN_USER | PIN_NOEVICT);
if (unlikely(err))
return err;
}
@@ -515,20 +515,11 @@ static bool platform_has_relocs_enabled(const struct i915_execbuffer *eb)
}
static inline bool
-is_other_bb(struct i915_execbuffer *eb, unsigned buffer_idx)
+is_batch_buffer(struct i915_execbuffer *eb, unsigned buffer_idx)
{
- if (eb->num_batches > 1 && buffer_idx != eb->batch_index) {
- if (eb->args->flags & I915_EXEC_BATCH_FIRST) {
- if (buffer_idx < eb->num_batches)
- return true;
- } else {
- if (buffer_idx >=
- eb->args->buffer_count - eb->num_batches)
- return true;
- }
- }
-
- return false;
+ return eb->args->flags & I915_EXEC_BATCH_FIRST ?
+ buffer_idx <= eb->num_batches :
+ buffer_idx >= eb->args->buffer_count - eb->num_batches;
}
static int
@@ -582,8 +573,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
static void
eb_add_vma(struct i915_execbuffer *eb,
- unsigned int buffer_idx, unsigned batch_idx,
- struct i915_vma *vma)
+ unsigned int buffer_idx, struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[buffer_idx];
struct eb_vma *ev = &eb->vma[buffer_idx];
@@ -611,14 +601,14 @@ eb_add_vma(struct i915_execbuffer *eb,
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
- if (buffer_idx == batch_idx || is_other_bb(eb, buffer_idx)) {
+ if (is_batch_buffer(eb, buffer_idx)) {
if (entry->relocation_count &&
!(ev->flags & EXEC_OBJECT_PINNED))
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- if (buffer_idx == batch_idx)
+ if (buffer_idx == eb->batch_index)
eb->batch = ev;
}
}
@@ -655,7 +645,7 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
return err;
}
- err = i915_vma_pin_ww(vma, &eb->ww,
+ err = i915_vma_pin_ww(vma, eb->ww,
entry->pad_to_size, entry->alignment,
eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
@@ -893,7 +883,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err;
}
- eb_add_vma(eb, i, eb->batch_index, vma);
+ eb_add_vma(eb, i, vma);
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
@@ -952,7 +942,7 @@ static int eb_lock_vmas(struct i915_execbuffer *eb)
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
- err = i915_gem_object_lock(vma->obj, &eb->ww);
+ err = i915_gem_object_lock(vma->obj, eb->ww);
if (err)
return err;
}
@@ -1032,12 +1022,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
-static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
+static void eb_release_vmas(struct i915_execbuffer *eb, bool final,
+ bool unreserve)
{
const unsigned int count = eb->buffer_count;
unsigned int i;
- for (i = 0; i < count; i++) {
+ for (i = 0; unreserve && i < count; i++) {
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
@@ -1249,7 +1240,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err)
return ERR_PTR(err);
- vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
+ vma = i915_gem_object_ggtt_pin_ww(obj, eb->ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
@@ -1373,7 +1364,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
}
eb->reloc_pool = NULL;
- err = i915_gem_object_lock(pool->obj, &eb->ww);
+ err = i915_gem_object_lock(pool->obj, eb->ww);
if (err)
goto err_pool;
@@ -1392,7 +1383,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unmap;
}
- err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
+ err = i915_vma_pin_ww(batch, eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
if (err)
goto err_unmap;
@@ -1414,7 +1405,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
eb->reloc_context = ce;
}
- err = intel_context_pin_ww(ce, &eb->ww);
+ err = intel_context_pin_ww(ce, eb->ww);
if (err)
goto err_unpin;
@@ -2025,8 +2016,8 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
}
/* We may process another execbuffer during the unlock... */
- eb_release_vmas(eb, false);
- i915_gem_ww_ctx_fini(&eb->ww);
+ eb_release_vmas(eb, false, true);
+ i915_gem_ww_ctx_fini(eb->ww);
if (rq) {
/* nonblocking is always false */
@@ -2070,7 +2061,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
err = eb_reinit_userptr(eb);
err_relock:
- i915_gem_ww_ctx_init(&eb->ww, true);
+ i915_gem_ww_ctx_init(eb->ww, true);
if (err)
goto out;
@@ -2127,8 +2118,8 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
err:
if (err == -EDEADLK) {
- eb_release_vmas(eb, false);
- err = i915_gem_ww_ctx_backoff(&eb->ww);
+ eb_release_vmas(eb, false, true);
+ err = i915_gem_ww_ctx_backoff(eb->ww);
if (!err)
goto repeat_validate;
}
@@ -2160,7 +2151,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
return err;
}
-static int eb_relocate_parse(struct i915_execbuffer *eb)
+static int eb_relocate_parse(struct i915_execbuffer *eb, bool first)
{
int err;
struct i915_request *rq = NULL;
@@ -2197,14 +2188,16 @@ static int eb_relocate_parse(struct i915_execbuffer *eb)
/* only throttle once, even if we didn't need to throttle */
throttle = false;
- err = eb_validate_vmas(eb);
- if (err == -EAGAIN)
- goto slow;
- else if (err)
- goto err;
+ if (first) {
+ err = eb_validate_vmas(eb);
+ if (err == -EAGAIN)
+ goto slow;
+ else if (err)
+ goto err;
+ }
/* The objects are in their final locations, apply the relocations. */
- if (eb->args->flags & __EXEC_HAS_RELOC) {
+ if (eb->args->flags & __EXEC_HAS_RELOC && first) {
struct eb_vma *ev;
list_for_each_entry(ev, &eb->relocs, reloc_link) {
@@ -2219,13 +2212,13 @@ static int eb_relocate_parse(struct i915_execbuffer *eb)
goto slow;
}
- if (!err)
+ if (!err && first)
err = eb_parse(eb);
err:
if (err == -EDEADLK) {
- eb_release_vmas(eb, false);
- err = i915_gem_ww_ctx_backoff(&eb->ww);
+ eb_release_vmas(eb, false, true);
+ err = i915_gem_ww_ctx_backoff(eb->ww);
if (!err)
goto retry;
}
@@ -2406,7 +2399,7 @@ shadow_batch_pin(struct i915_execbuffer *eb,
if (IS_ERR(vma))
return vma;
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
+ err = i915_vma_pin_ww(vma, eb->ww, 0, 0, flags);
if (err)
return ERR_PTR(err);
@@ -2631,7 +2624,7 @@ static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i9
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (eb->batch_flags & I915_DISPATCH_SECURE)
- return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
+ return i915_gem_object_ggtt_pin_ww(vma->obj, eb->ww, NULL, 0, 0, 0);
return NULL;
}
@@ -2677,7 +2670,7 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_pool = pool;
}
- err = i915_gem_object_lock(pool->obj, &eb->ww);
+ err = i915_gem_object_lock(pool->obj, eb->ww);
if (err)
goto err;
@@ -2877,7 +2870,7 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = intel_context_pin_ww(ce, &eb->ww);
+ err = intel_context_pin_ww(ce, eb->ww);
if (err)
return ERR_PTR(err);
@@ -3429,7 +3422,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
unsigned int batch_number,
struct dma_fence *in_fence,
struct dma_fence *exec_fence,
- struct dma_fence **out_fence)
+ struct dma_fence **out_fence,
+ struct i915_gem_ww_ctx *ww)
{
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
@@ -3450,7 +3444,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.exec = exec;
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
+ if (first)
+ eb.vma[0].vma = NULL;
eb.reloc_pool = eb.batch_pool = NULL;
eb.reloc_context = NULL;
@@ -3462,6 +3457,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_len = args->batch_len;
eb.trampoline = NULL;
eb.composite_fence = NULL;
+ eb.ww = ww;
eb.fences = NULL;
eb.num_fences = 0;
@@ -3480,9 +3476,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (err)
goto err_ext;
- err = eb_create(&eb);
- if (err)
- goto err_ext;
+ if (first) {
+ err = eb_create(&eb);
+ if (err)
+ goto err_ext;
+ } else {
+ eb.lut_size = -eb.buffer_count;
+ }
+
GEM_BUG_ON(!eb.lut_size);
@@ -3497,15 +3498,22 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = eb_lookup_vmas(&eb);
- if (err) {
- eb_release_vmas(&eb, true);
- goto err_engine;
+ if (first) {
+ err = eb_lookup_vmas(&eb);
+ if (err) {
+ eb_release_vmas(&eb, true, true);
+ goto err_engine;
+ }
+
+ } else {
+ eb.batch = &eb.vma[eb.batch_index];
}
- i915_gem_ww_ctx_init(&eb.ww, true);
- err = eb_relocate_parse(&eb);
+ if (first)
+ i915_gem_ww_ctx_init(eb.ww, true);
+
+ err = eb_relocate_parse(&eb, first);
if (err) {
/*
* If the user expects the execobject.offset and
@@ -3518,7 +3526,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- ww_acquire_done(&eb.ww.ctx);
+ if (first)
+ ww_acquire_done(&eb.ww->ctx);
batch = eb.batch->vma;
@@ -3621,11 +3630,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
i915_request_put(eb.request);
err_vma:
- eb_release_vmas(&eb, true);
+ eb_release_vmas(&eb, true, err || last);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK);
- i915_gem_ww_ctx_fini(&eb.ww);
+ if (err || last)
+ i915_gem_ww_ctx_fini(eb.ww);
if (eb.batch_pool)
intel_gt_buffer_pool_put(eb.batch_pool);
@@ -3687,6 +3697,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
const size_t count = args->buffer_count;
int err;
struct i915_gem_context *ctx;
+ struct i915_gem_ww_ctx ww;
struct intel_context *parent = NULL;
unsigned int num_batches = 1, i;
@@ -3810,7 +3821,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
0,
in_fence,
exec_fence,
- out_fences);
+ out_fences,
+ &ww);
for (i = 1; err == 0 && i < num_batches; i++)
err = i915_gem_do_execbuffer(dev, file, args, exec2_list,
@@ -3820,7 +3832,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
i,
NULL,
NULL,
- out_fences);
+ out_fences,
+ &ww);
if (parent)
mutex_unlock(&parent->parallel_submit);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 4df505e4c53a..e858018017d8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -32,11 +32,11 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_gem_object_lock(obj, &eb->ww);
+ err = i915_gem_object_lock(obj, eb->ww);
if (err)
return err;
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
+ err = i915_vma_pin_ww(vma, eb->ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
return err;
@@ -106,10 +106,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
static int igt_gpu_reloc(void *arg)
{
struct i915_execbuffer eb;
+ struct i915_gem_ww_ctx ww;
struct drm_i915_gem_object *scratch;
int err = 0;
u32 *map;
+ eb.ww = &ww;
eb.i915 = arg;
scratch = i915_gem_object_create_internal(eb.i915, 4096);
@@ -137,20 +139,20 @@ static int igt_gpu_reloc(void *arg)
eb.reloc_pool = NULL;
eb.reloc_context = NULL;
- i915_gem_ww_ctx_init(&eb.ww, false);
+ i915_gem_ww_ctx_init(eb.ww, false);
retry:
- err = intel_context_pin_ww(eb.context, &eb.ww);
+ err = intel_context_pin_ww(eb.context, eb.ww);
if (!err) {
err = __igt_gpu_reloc(&eb, scratch);
intel_context_unpin(eb.context);
}
if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&eb.ww);
+ err = i915_gem_ww_ctx_backoff(eb.ww);
if (!err)
goto retry;
}
- i915_gem_ww_ctx_fini(&eb.ww);
+ i915_gem_ww_ctx_fini(eb.ww);
if (eb.reloc_pool)
intel_gt_buffer_pool_put(eb.reloc_pool);
--
2.28.0
More information about the Intel-gfx-trybot
mailing list