[PATCH i-g-t 29/52] WIP: tests/gem_exec_fence: rewrite to no-reloc
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Thu Jul 1 12:37:09 UTC 2021
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
---
tests/i915/gem_exec_fence.c | 276 ++++++++++++++++++++++++++----------
1 file changed, 198 insertions(+), 78 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index c3a650d89..27bcaf028 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -57,7 +57,8 @@ struct sync_merge_data {
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
static void store(int fd, const struct intel_execution_engine2 *e,
- int fence, uint32_t target, unsigned offset_value)
+ int fence, uint32_t target, uint64_t target_offset,
+ unsigned offset_value, uint64_t ahnd)
{
const int SCRATCH = 0;
const int BATCH = 1;
@@ -65,7 +66,8 @@ static void store(int fd, const struct intel_execution_engine2 *e,
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
- uint32_t batch[16];
+ uint32_t batch[16], delta;
+ uint64_t bb_offset;
int i;
memset(&execbuf, 0, sizeof(execbuf));
@@ -78,36 +80,50 @@ static void store(int fd, const struct intel_execution_engine2 *e,
memset(obj, 0, sizeof(obj));
obj[SCRATCH].handle = target;
-
obj[BATCH].handle = gem_create(fd, 4096);
- obj[BATCH].relocs_ptr = to_user_pointer(&reloc);
- obj[BATCH].relocation_count = 1;
- memset(&reloc, 0, sizeof(reloc));
+ bb_offset = get_offset(ahnd, obj[BATCH].handle, 4096, 0);
+ if (!ahnd) {
+ obj[BATCH].relocs_ptr = to_user_pointer(&reloc);
+ obj[BATCH].relocation_count = 1;
+ }
+ memset(&reloc, 0, sizeof(reloc));
i = 0;
- reloc.target_handle = obj[SCRATCH].handle;
- reloc.presumed_offset = -1;
- reloc.offset = sizeof(uint32_t) * (i + 1);
- reloc.delta = sizeof(uint32_t) * offset_value;
- reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
+ delta = sizeof(uint32_t) * offset_value;
+ if (!ahnd) {
+ reloc.target_handle = obj[SCRATCH].handle;
+ reloc.presumed_offset = -1;
+ reloc.offset = sizeof(uint32_t) * (i + 1);
+ reloc.delta = delta;
+ reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
+ } else {
+ obj[SCRATCH].offset = target_offset;
+ obj[SCRATCH].flags |= EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+ obj[BATCH].offset = bb_offset;
+ obj[BATCH].flags |= EXEC_OBJECT_PINNED;
+ }
batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
- batch[++i] = reloc.delta;
- batch[++i] = 0;
+ batch[++i] = target_offset + delta;
+ batch[++i] = target_offset >> 32;
} else if (gen >= 4) {
batch[++i] = 0;
- batch[++i] = reloc.delta;
+ batch[++i] = delta;
reloc.offset += sizeof(uint32_t);
} else {
batch[i]--;
- batch[++i] = reloc.delta;
+ batch[++i] = delta;
}
batch[++i] = offset_value;
batch[++i] = MI_BATCH_BUFFER_END;
gem_write(fd, obj[BATCH].handle, 0, batch, sizeof(batch));
gem_execbuf(fd, &execbuf);
+ //igt_info("v: %u, handle: %u, batch: %llx, scratch: %llx\n",
+ // offset_value, obj[BATCH].handle,
+ // obj[BATCH].offset, obj[SCRATCH].offset);
gem_close(fd, obj[BATCH].handle);
+ put_offset(ahnd, obj[BATCH].handle);
}
static bool fence_busy(int fence)
@@ -128,6 +144,7 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
struct drm_i915_gem_execbuffer2 execbuf;
struct timespec tv;
uint32_t *batch;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
int fence, i, timeout;
if ((flags & HANG) == 0)
@@ -142,10 +159,7 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
-
- obj.relocs_ptr = to_user_pointer(&reloc);
- obj.relocation_count = 1;
- memset(&reloc, 0, sizeof(reloc));
+ obj.offset = get_offset(ahnd, obj.handle, 4096, 0);
batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
gem_set_domain(fd, obj.handle,
@@ -155,26 +169,33 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
if ((flags & HANG) == 0)
batch[i++] = 0x5 << 23;
- reloc.target_handle = obj.handle; /* recurse */
- reloc.presumed_offset = 0;
- reloc.offset = (i + 1) * sizeof(uint32_t);
- reloc.delta = 0;
- reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
- reloc.write_domain = 0;
+ if (!ahnd) {
+ obj.relocs_ptr = to_user_pointer(&reloc);
+ obj.relocation_count = 1;
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.target_handle = obj.handle; /* recurse */
+ reloc.presumed_offset = obj.offset;
+ reloc.offset = (i + 1) * sizeof(uint32_t);
+ reloc.delta = 0;
+ reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc.write_domain = 0;
+ } else {
+ obj.flags |= EXEC_OBJECT_PINNED;
+ }
batch[i] = MI_BATCH_BUFFER_START;
if (gen >= 8) {
batch[i] |= 1 << 8 | 1;
- batch[++i] = 0;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
+ batch[++i] = obj.offset >> 32;
} else if (gen >= 6) {
batch[i] |= 1 << 8;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
} else {
batch[i] |= 2 << 6;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
if (gen < 4) {
- batch[i] |= 1;
+ batch[i]++;
reloc.delta = 1;
}
}
@@ -211,6 +232,8 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
close(fence);
gem_close(fd, obj.handle);
+ put_offset(ahnd, obj.handle);
+ put_ahnd(ahnd);
gem_quiescent_gpu(fd);
}
@@ -224,6 +247,7 @@ static void test_fence_busy_all(int fd, unsigned flags)
struct drm_i915_gem_execbuffer2 execbuf;
struct timespec tv;
uint32_t *batch;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
int all, i, timeout;
gem_quiescent_gpu(fd);
@@ -234,10 +258,8 @@ static void test_fence_busy_all(int fd, unsigned flags)
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
-
- obj.relocs_ptr = to_user_pointer(&reloc);
- obj.relocation_count = 1;
- memset(&reloc, 0, sizeof(reloc));
+ obj.offset = get_offset(ahnd, obj.handle, 4096, 0);
+ igt_assert(obj.offset != -1);
batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
gem_set_domain(fd, obj.handle,
@@ -247,26 +269,33 @@ static void test_fence_busy_all(int fd, unsigned flags)
if ((flags & HANG) == 0)
batch[i++] = 0x5 << 23;
- reloc.target_handle = obj.handle; /* recurse */
- reloc.presumed_offset = 0;
- reloc.offset = (i + 1) * sizeof(uint32_t);
- reloc.delta = 0;
- reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
- reloc.write_domain = 0;
+ if (!ahnd) {
+ obj.relocs_ptr = to_user_pointer(&reloc);
+ obj.relocation_count = 1;
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.target_handle = obj.handle; /* recurse */
+ reloc.presumed_offset = obj.offset;
+ reloc.offset = (i + 1) * sizeof(uint32_t);
+ reloc.delta = 0;
+ reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc.write_domain = 0;
+ } else {
+ obj.flags |= EXEC_OBJECT_PINNED;
+ }
batch[i] = MI_BATCH_BUFFER_START;
if (gen >= 8) {
batch[i] |= 1 << 8 | 1;
- batch[++i] = 0;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
+ batch[++i] = obj.offset >> 32;
} else if (gen >= 6) {
batch[i] |= 1 << 8;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
} else {
batch[i] |= 2 << 6;
- batch[++i] = 0;
+ batch[++i] = obj.offset;
if (gen < 4) {
- batch[i] |= 1;
+ batch[i]++;
reloc.delta = 1;
}
}
@@ -283,6 +312,7 @@ static void test_fence_busy_all(int fd, unsigned flags)
execbuf.flags = e->flags | I915_EXEC_FENCE_OUT;
execbuf.rsvd2 = -1;
gem_execbuf_wr(fd, &execbuf);
+
fence = execbuf.rsvd2 >> 32;
igt_assert(fence != -1);
@@ -325,6 +355,8 @@ static void test_fence_busy_all(int fd, unsigned flags)
close(all);
gem_close(fd, obj.handle);
+ put_offset(ahnd, obj.handle);
+ put_ahnd(ahnd);
gem_quiescent_gpu(fd);
}
@@ -344,15 +376,19 @@ static void test_fence_await(int fd, const struct intel_execution_engine2 *e,
uint32_t scratch = gem_create(fd, 4096);
igt_spin_t *spin;
uint32_t *out;
+ uint64_t scratch_offset, ahnd = get_reloc_ahnd(fd, 0);
int i;
+ scratch_offset = get_offset(ahnd, scratch, 4096, 0);
+
out = gem_mmap__wc(fd, scratch, 0, 4096, PROT_WRITE);
gem_set_domain(fd, scratch,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
spin = igt_spin_new(fd,
.engine = e->flags,
- .flags = IGT_SPIN_FENCE_OUT | spin_hang(flags));
+ .flags = IGT_SPIN_FENCE_OUT | spin_hang(flags),
+ .ahnd = ahnd);
igt_assert(spin->out_fence != -1);
i = 0;
@@ -361,10 +397,17 @@ static void test_fence_await(int fd, const struct intel_execution_engine2 *e,
continue;
if (flags & NONBLOCK) {
- store(fd, e2, spin->out_fence, scratch, i);
+ store(fd, e2, spin->out_fence, scratch, scratch_offset,
+ i, ahnd);
} else {
- igt_fork(child, 1)
- store(fd, e2, spin->out_fence, scratch, i);
+ igt_fork(child, 1) {
+ ahnd = get_reloc_ahnd(fd, 0);
+
+ store(fd, e2, spin->out_fence, scratch, scratch_offset,
+ i, ahnd);
+
+ put_ahnd(ahnd);
+ }
}
i++;
@@ -390,6 +433,9 @@ static void test_fence_await(int fd, const struct intel_execution_engine2 *e,
igt_spin_free(fd, spin);
gem_close(fd, scratch);
+ put_offset(ahnd, scratch);
+ put_ahnd(ahnd);
+
}
static uint32_t timeslicing_batches(int i915, uint32_t *offset)
@@ -606,13 +652,16 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
igt_spin_t *spin;
int fence;
int x = 0;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0), bb_offset;
+ uint64_t scratch_offset = get_offset(ahnd, scratch, 4096, 0);
fence = igt_cork_plug(&cork, i915),
spin = igt_spin_new(i915,
.engine = e->flags,
.fence = fence,
.flags = (IGT_SPIN_FENCE_OUT |
- IGT_SPIN_FENCE_IN));
+ IGT_SPIN_FENCE_IN),
+ .ahnd = ahnd);
close(fence);
/* Queue all secondaries */
@@ -626,7 +675,7 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
{ .handle = scratch, },
{
.relocs_ptr = to_user_pointer(&reloc),
- .relocation_count = 1,
+ .relocation_count = !ahnd ? 1 : 0,
}
};
struct drm_i915_gem_execbuffer2 execbuf = {
@@ -643,11 +692,19 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
obj[1].handle = gem_create(i915, 4096);
+ if (ahnd) {
+ bb_offset = get_offset(ahnd, obj[1].handle, 4096, 0);
+ obj[1].offset = bb_offset;
+ obj[1].flags = EXEC_OBJECT_PINNED;
+ obj[0].offset = scratch_offset;
+ obj[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+ }
+
i = 0;
batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
- batch[++i] = reloc.delta;
- batch[++i] = 0;
+ batch[++i] = scratch_offset + reloc.delta;
+ batch[++i] = scratch_offset >> 32;
} else if (gen >= 4) {
batch[++i] = 0;
batch[++i] = reloc.delta;
@@ -668,6 +725,7 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
}
igt_assert(gem_bo_busy(i915, spin->handle));
gem_close(i915, scratch);
+ put_offset(ahnd, scratch);
igt_require(x);
/*
@@ -694,17 +752,20 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
igt_assert_eq_u32(out[i], ~i);
gem_close(i915, handle[i]);
+ put_offset(ahnd, handle[i]);
}
munmap(out, 4096);
/* Master should still be spinning, but all output should be written */
igt_assert(gem_bo_busy(i915, spin->handle));
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
struct drm_i915_gem_relocation_entry reloc = {
.target_handle = gem_create(i915, 4096),
.write_domain = I915_GEM_DOMAIN_RENDER,
@@ -715,7 +776,7 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
{
.handle = gem_create(i915, 4096),
.relocs_ptr = to_user_pointer(&reloc),
- .relocation_count = 1,
+ .relocation_count = !ahnd ? 1 : 0,
}
};
struct drm_i915_gem_execbuffer2 execbuf = {
@@ -727,9 +788,19 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
uint32_t batch[16];
igt_spin_t *spin;
uint32_t result;
+ uint64_t bb_offset, target_offset;
int fence;
int i;
+ bb_offset = get_offset(ahnd, obj[1].handle, 4096, 0);
+ target_offset = get_offset(ahnd, obj[0].handle, 4096, 0);
+ if (ahnd) {
+ obj[1].offset = bb_offset;
+ obj[1].flags = EXEC_OBJECT_PINNED;
+ obj[0].offset = target_offset;
+ obj[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+ }
+
/*
* A variant of test_parallel() that runs a bonded pair on a single
* engine and ensures that the secondary batch cannot start before
@@ -741,14 +812,15 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
.engine = e->flags,
.fence = fence,
.flags = (IGT_SPIN_FENCE_OUT |
- IGT_SPIN_FENCE_IN));
+ IGT_SPIN_FENCE_IN),
+ .ahnd = ahnd);
close(fence);
i = 0;
batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
- batch[++i] = reloc.delta;
- batch[++i] = 0;
+ batch[++i] = target_offset + reloc.delta;
+ batch[++i] = target_offset >> 32;
} else if (gen >= 4) {
batch[++i] = 0;
batch[++i] = reloc.delta;
@@ -759,6 +831,7 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
}
batch[++i] = 0xd0df0d;
batch[++i] = MI_BATCH_BUFFER_END;
+
gem_write(i915, obj[1].handle, 0, batch, sizeof(batch));
execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
@@ -769,6 +842,7 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
gem_execbuf(i915, &execbuf);
gem_context_destroy(i915, execbuf.rsvd1);
gem_close(i915, obj[1].handle);
+ put_offset(ahnd, obj[1].handle);
/*
* No secondary should be executed since master is stalled. If there
@@ -790,10 +864,12 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
gem_read(i915, obj[0].handle, 0, &result, sizeof(result));
igt_assert_eq_u32(result, 0xd0df0d);
gem_close(i915, obj[0].handle);
+ put_offset(ahnd, obj[0].handle);
/* Master should still be spinning, but all output should be written */
igt_assert(gem_bo_busy(i915, spin->handle));
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_submit_chain(int i915)
@@ -803,6 +879,7 @@ static void test_submit_chain(int i915)
IGT_LIST_HEAD(list);
IGT_CORK_FENCE(cork);
int fence;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
/* Check that we can simultaneously launch spinners on each engine */
@@ -813,7 +890,8 @@ static void test_submit_chain(int i915)
.fence = fence,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT |
- IGT_SPIN_FENCE_SUBMIT));
+ IGT_SPIN_FENCE_SUBMIT),
+ .ahnd = ahnd);
fence = spin->out_fence;
igt_list_move(&spin->link, &list);
@@ -835,6 +913,7 @@ static void test_submit_chain(int i915)
igt_assert_eq(sync_fence_status(spin->out_fence), 1);
igt_spin_free(i915, spin);
}
+ put_ahnd(ahnd);
}
static uint32_t batch_create(int fd)
@@ -862,9 +941,10 @@ static void test_keep_in_fence(int fd, const struct intel_execution_engine2 *e)
unsigned long count, last;
struct itimerval itv;
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
int fence;
- spin = igt_spin_new(fd, .engine = e->flags);
+ spin = igt_spin_new(fd, .engine = e->flags, .ahnd = ahnd);
gem_execbuf_wr(fd, &execbuf);
fence = upper_32_bits(execbuf.rsvd2);
@@ -913,6 +993,7 @@ static void test_keep_in_fence(int fd, const struct intel_execution_engine2 *e)
igt_spin_free(fd, spin);
gem_quiescent_gpu(fd);
+ put_ahnd(ahnd);
}
#define EXPIRED 0x10000
@@ -1135,7 +1216,8 @@ static void test_syncobj_unused_fence(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
- igt_spin_t *spin = igt_spin_new(fd);
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
+ igt_spin_t *spin = igt_spin_new(fd, .ahnd = ahnd);
/* sanity check our syncobj_to_sync_file interface */
igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1161,6 +1243,7 @@ static void test_syncobj_unused_fence(int fd)
syncobj_destroy(fd, fence.handle);
igt_spin_free(fd, spin);
+ put_ahnd(ahnd);
}
static void test_syncobj_invalid_wait(int fd)
@@ -1227,10 +1310,11 @@ static void test_syncobj_signal(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
- igt_spin_t *spin = igt_spin_new(fd);
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
+ igt_spin_t *spin;
/* Check that the syncobj is signaled only when our request/fence is */
-
+ spin = igt_spin_new(fd, .ahnd = ahnd);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
@@ -1256,6 +1340,7 @@ static void test_syncobj_signal(int fd)
gem_close(fd, obj.handle);
syncobj_destroy(fd, fence.handle);
+ put_ahnd(ahnd);
}
static void test_syncobj_wait(int fd)
@@ -1270,6 +1355,7 @@ static void test_syncobj_wait(int fd)
unsigned handle[I915_EXEC_RING_MASK + 1];
igt_spin_t *spin;
int n;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
/* Check that we can use the syncobj to asynchronous wait prior to
* execution.
@@ -1277,7 +1363,7 @@ static void test_syncobj_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_new(fd);
+ spin = igt_spin_new(fd, .ahnd = ahnd);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1326,6 +1412,8 @@ static void test_syncobj_wait(int fd)
gem_sync(fd, handle[i]);
gem_close(fd, handle[i]);
}
+
+ put_ahnd(ahnd);
}
static void test_syncobj_export(int fd)
@@ -1337,7 +1425,10 @@ static void test_syncobj_export(int fd)
.handle = syncobj_create(fd, 0),
};
int export[2];
- igt_spin_t *spin = igt_spin_new(fd);
+ igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
+
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/* Check that if we export the syncobj prior to use it picks up
* the later fence. This allows a syncobj to establish a channel
@@ -1385,6 +1476,8 @@ static void test_syncobj_export(int fd)
syncobj_destroy(fd, import);
close(export[n]);
}
+
+ put_ahnd(ahnd);
}
static void test_syncobj_repeat(int fd)
@@ -1395,7 +1488,10 @@ static void test_syncobj_repeat(int fd)
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence *fence;
int export;
- igt_spin_t *spin = igt_spin_new(fd);
+ igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
+
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/* Check that we can wait on the same fence multiple times */
fence = calloc(nfences, sizeof(*fence));
@@ -1443,6 +1539,8 @@ static void test_syncobj_repeat(int fd)
syncobj_destroy(fd, fence[i].handle);
}
free(fence);
+
+ put_ahnd(ahnd);
}
static void test_syncobj_import(int fd)
@@ -1450,10 +1548,13 @@ static void test_syncobj_import(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- igt_spin_t *spin = igt_spin_new(fd);
+ igt_spin_t *spin;
uint32_t sync = syncobj_create(fd, 0);
+ uint64_t ahnd = get_reloc_ahnd(fd, 0);
int fence;
+ spin = igt_spin_new(fd, .ahnd = ahnd);
+
/* Check that we can create a syncobj from an explicit fence (which
* uses sync_file) and that it acts just like a regular fence.
*/
@@ -1486,6 +1587,7 @@ static void test_syncobj_import(int fd)
gem_close(fd, obj.handle);
syncobj_destroy(fd, sync);
+ put_ahnd(ahnd);
}
static void test_syncobj_channel(int fd)
@@ -1777,8 +1879,10 @@ static void test_syncobj_timeline_unused_fence(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
- igt_spin_t *spin = igt_spin_new(fd);
- uint64_t value = 1;
+ igt_spin_t *spin;
+ uint64_t value = 1, ahnd = get_reloc_ahnd(fd, 0);
+
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/* sanity check our syncobj_to_sync_file interface */
igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1810,6 +1914,7 @@ static void test_syncobj_timeline_unused_fence(int fd)
syncobj_destroy(fd, fence.handle);
igt_spin_free(fd, spin);
+ put_ahnd(ahnd);
}
static const char *test_syncobj_timeline_invalid_wait_desc =
@@ -1918,7 +2023,7 @@ static void test_syncobj_timeline_signal(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
- uint64_t value = 42, query_value;
+ uint64_t value = 42, query_value, ahnd = get_reloc_ahnd(fd, 0);
igt_spin_t *spin;
/* Check that the syncobj is signaled only when our request/fence is */
@@ -1943,7 +2048,7 @@ static void test_syncobj_timeline_signal(int fd)
fence.flags = I915_EXEC_FENCE_SIGNAL;
/* Check syncobj after waiting on the buffer handle. */
- spin = igt_spin_new(fd);
+ spin = igt_spin_new(fd, .ahnd = ahnd);
gem_execbuf(fd, &execbuf);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -1962,7 +2067,7 @@ static void test_syncobj_timeline_signal(int fd)
syncobj_timeline_query(fd, &fence.handle, &query_value, 1);
igt_assert_eq(query_value, value);
- spin = igt_spin_new(fd);
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/*
* Wait on the syncobj and verify the state of the buffer
@@ -1993,6 +2098,7 @@ static void test_syncobj_timeline_signal(int fd)
gem_close(fd, obj.handle);
syncobj_destroy(fd, fence.handle);
+ put_ahnd(ahnd);
}
static const char *test_syncobj_timeline_wait_desc =
@@ -2015,7 +2121,7 @@ static void test_syncobj_timeline_wait(int fd)
};
unsigned handle[I915_EXEC_RING_MASK + 1];
const struct intel_execution_engine2 *e;
- uint64_t value = 1;
+ uint64_t value = 1, ahnd = get_reloc_ahnd(fd, 0);
igt_spin_t *spin;
int n;
@@ -2025,7 +2131,7 @@ static void test_syncobj_timeline_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_new(fd, .engine = ALL_ENGINES);
+ spin = igt_spin_new(fd, .engine = ALL_ENGINES, .ahnd = ahnd);
memset(&timeline_fences, 0, sizeof(timeline_fences));
timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
@@ -2073,6 +2179,7 @@ static void test_syncobj_timeline_wait(int fd)
gem_sync(fd, handle[i]);
gem_close(fd, handle[i]);
}
+ put_ahnd(ahnd);
}
static const char *test_syncobj_timeline_export_desc =
@@ -2089,9 +2196,11 @@ static void test_syncobj_timeline_export(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
- uint64_t value = 1;
+ uint64_t value = 1, ahnd = get_reloc_ahnd(fd, 0);
int export[2];
- igt_spin_t *spin = igt_spin_new(fd);
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/* Check that if we export the syncobj prior to use it picks up
* the later fence. This allows a syncobj to establish a channel
@@ -2145,6 +2254,7 @@ static void test_syncobj_timeline_export(int fd)
syncobj_destroy(fd, import);
close(export[n]);
}
+ put_ahnd(ahnd);
}
static const char *test_syncobj_timeline_repeat_desc =
@@ -2161,9 +2271,11 @@ static void test_syncobj_timeline_repeat(int fd)
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
struct drm_i915_gem_exec_fence *fence;
- uint64_t *values;
+ uint64_t *values, ahnd = get_reloc_ahnd(fd, 0);
int export;
- igt_spin_t *spin = igt_spin_new(fd);
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(fd, .ahnd = ahnd);
/* Check that we can wait on the same fence multiple times */
fence = calloc(nfences, sizeof(*fence));
@@ -2234,6 +2346,7 @@ static void test_syncobj_timeline_repeat(int fd)
}
free(fence);
free(values);
+ put_ahnd(ahnd);
}
static const char *test_syncobj_timeline_multiple_ext_nodes_desc =
@@ -2966,6 +3079,7 @@ igt_main
igt_subtest_group {
igt_fixture {
igt_fork_hang_detector(i915);
+ intel_allocator_multiprocess_start();
}
igt_subtest_with_dynamic("basic-busy") {
@@ -3058,6 +3172,7 @@ igt_main
}
igt_fixture {
+ intel_allocator_multiprocess_stop();
igt_stop_hang_detector();
}
}
@@ -3067,6 +3182,7 @@ igt_main
igt_fixture {
hang = igt_allow_hang(i915, 0, 0);
+ intel_allocator_multiprocess_start();
}
igt_subtest_with_dynamic("busy-hang") {
@@ -3094,6 +3210,7 @@ igt_main
}
}
igt_fixture {
+ intel_allocator_multiprocess_stop();
igt_disallow_hang(i915, hang);
}
}
@@ -3117,11 +3234,13 @@ igt_main
test_long_history(i915, ring_size, EXPIRED);
}
+
igt_subtest_group { /* syncobj */
igt_fixture {
igt_require(exec_has_fence_array(i915));
igt_assert(has_syncobj(i915));
igt_fork_hang_detector(i915);
+ intel_allocator_multiprocess_start();
}
igt_subtest("invalid-fence-array")
@@ -3155,6 +3274,7 @@ igt_main
test_syncobj_channel(i915);
igt_fixture {
+ intel_allocator_multiprocess_stop();
igt_stop_hang_detector();
}
}
--
2.26.0
More information about the Intel-gfx-trybot
mailing list