[Intel-gfx] [PATCH v2] drm/i915/selftests: Trim blitter block size
Chris Wilson
chris at chris-wilson.co.uk
Thu Feb 6 10:38:04 UTC 2020
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
v2: Grow the maximum block size until we run out of time
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 54 +++++++++++--------
1 file changed, 32 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..d7bc5b1b6989 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -225,10 +226,11 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
@@ -238,11 +240,9 @@ static int igt_fill_blt_thread(void *arg)
* If we have a tiny shared address space, like for the GGTT
* then we can't be too greedy.
*/
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total);
+ sz = i915_prandom_u32_max_state(total, prng);
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -276,13 +276,16 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +296,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +324,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -334,20 +340,19 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total);
+ sz = i915_prandom_u32_max_state(total, prng);
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -397,13 +402,16 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +424,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
--
2.25.0
More information about the Intel-gfx
mailing list