[PATCH i-g-t 25/28] NORELOC - gem_exec_capture - capture() & userptr()
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Fri Jun 11 11:51:00 UTC 2021
From: Andrzej Turko <andrzej.turko at intel.com>
---
tests/i915/gem_exec_capture.c | 96 ++++++++++++++++++++++++-----------
1 file changed, 67 insertions(+), 29 deletions(-)
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index a6b3d987f..0f76816ac 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -33,6 +33,9 @@
IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
+/* Without alignment detection we assume the worst-case scenario */
+#define ALIGNMENT (1 << 22)
+
static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
{
char *error, *str;
@@ -53,7 +56,7 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
addr = hi;
addr <<= 32;
addr |= lo;
- igt_assert_eq_u64(addr, obj->offset);
+ igt_assert_eq_u64(CANONICAL(addr), obj->offset);
found = true;
}
@@ -61,7 +64,8 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
igt_assert(found);
}
-static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
+static void __capture1(int fd, int dir, unsigned ring, uint64_t ahnd,
+ uint32_t target, uint64_t target_size)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[4];
@@ -73,31 +77,50 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
struct drm_i915_gem_execbuffer2 execbuf;
uint32_t *batch, *seqno;
int i;
+ bool do_relocs = !gem_uses_ppgtt(fd);
memset(obj, 0, sizeof(obj));
obj[SCRATCH].handle = gem_create(fd, 4096);
+ obj[SCRATCH].offset = intel_allocator_alloc(ahnd, obj[SCRATCH].handle,
+ 4096, ALIGNMENT);
obj[CAPTURE].handle = target;
+ obj[CAPTURE].offset = intel_allocator_alloc(ahnd, target, target_size,
+ ALIGNMENT);
obj[CAPTURE].flags = EXEC_OBJECT_CAPTURE;
obj[NOCAPTURE].handle = gem_create(fd, 4096);
-
+ obj[NOCAPTURE].offset = intel_allocator_alloc(ahnd, obj[NOCAPTURE].handle,
+ 4096, ALIGNMENT);
obj[BATCH].handle = gem_create(fd, 4096);
- obj[BATCH].relocs_ptr = (uintptr_t)reloc;
- obj[BATCH].relocation_count = ARRAY_SIZE(reloc);
-
- memset(reloc, 0, sizeof(reloc));
- reloc[0].target_handle = obj[BATCH].handle; /* recurse */
- reloc[0].presumed_offset = 0;
- reloc[0].offset = 5*sizeof(uint32_t);
- reloc[0].delta = 0;
- reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
- reloc[0].write_domain = 0;
+ obj[BATCH].offset = intel_allocator_alloc(ahnd, obj[BATCH].handle,
+ 4096, ALIGNMENT);
+ for (i = 0; i < 4; i++) {
+ obj[i].flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ obj[i].offset = CANONICAL(obj[i].offset);
+ }
- reloc[1].target_handle = obj[SCRATCH].handle; /* breadcrumb */
- reloc[1].presumed_offset = 0;
- reloc[1].offset = sizeof(uint32_t);
- reloc[1].delta = 0;
- reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
- reloc[1].write_domain = I915_GEM_DOMAIN_RENDER;
+ if (do_relocs) {
+ obj[BATCH].relocs_ptr = (uintptr_t)reloc;
+ obj[BATCH].relocation_count = ARRAY_SIZE(reloc);
+
+ memset(reloc, 0, sizeof(reloc));
+ reloc[0].target_handle = obj[BATCH].handle; /* recurse */
+ reloc[0].presumed_offset = obj[BATCH].offset;
+ reloc[0].offset = 5*sizeof(uint32_t);
+ reloc[0].delta = 0;
+ reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].write_domain = 0;
+
+ reloc[1].target_handle = obj[SCRATCH].handle; /* breadcrumb */
+ reloc[1].presumed_offset = obj[SCRATCH].offset;
+ reloc[1].offset = sizeof(uint32_t);
+ reloc[1].delta = 0;
+ reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc[1].write_domain = I915_GEM_DOMAIN_RENDER;
+ } else {
+ obj[SCRATCH].flags |= EXEC_OBJECT_WRITE;
+ for (i = 0; i < 4; i++)
+ obj[i].flags |= EXEC_OBJECT_PINNED;
+ }
seqno = gem_mmap__wc(fd, obj[SCRATCH].handle, 0, 4096, PROT_READ);
gem_set_domain(fd, obj[SCRATCH].handle,
@@ -110,15 +133,15 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
i = 0;
batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
- batch[++i] = 0;
- batch[++i] = 0;
+ batch[++i] = obj[SCRATCH].offset;
+ batch[++i] = obj[SCRATCH].offset >> 32;
} else if (gen >= 4) {
batch[++i] = 0;
- batch[++i] = 0;
+ batch[++i] = obj[SCRATCH].offset;
reloc[1].offset += sizeof(uint32_t);
} else {
batch[i]--;
- batch[++i] = 0;
+ batch[++i] = obj[SCRATCH].offset;
}
batch[++i] = 0xc0ffee;
if (gen < 4)
@@ -127,14 +150,14 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
batch[++i] = MI_BATCH_BUFFER_START; /* not crashed? try again! */
if (gen >= 8) {
batch[i] |= 1 << 8 | 1;
- batch[++i] = 0;
- batch[++i] = 0;
+ batch[++i] = obj[BATCH].offset;
+ batch[++i] = obj[BATCH].offset >> 32;
} else if (gen >= 6) {
batch[i] |= 1 << 8;
- batch[++i] = 0;
+ batch[++i] = obj[BATCH].offset;
} else {
batch[i] |= 2 << 6;
- batch[++i] = 0;
+ batch[++i] = obj[BATCH].offset;
if (gen < 4) {
batch[i] |= 1;
reloc[0].delta = 1;
@@ -164,17 +187,27 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
gem_sync(fd, obj[BATCH].handle);
gem_close(fd, obj[BATCH].handle);
+ intel_allocator_free(ahnd, obj[BATCH].handle);
gem_close(fd, obj[NOCAPTURE].handle);
+ intel_allocator_free(ahnd, obj[NOCAPTURE].handle);
gem_close(fd, obj[SCRATCH].handle);
+ intel_allocator_free(ahnd, obj[SCRATCH].handle);
}
static void capture(int fd, int dir, unsigned ring)
{
uint32_t handle;
+ uint64_t ahnd;
+ ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
handle = gem_create(fd, 4096);
- __capture1(fd, dir, ring, handle);
+ intel_allocator_alloc(ahnd, handle, 4096, ALIGNMENT);
+
+ __capture1(fd, dir, ring, ahnd, handle, 4096);
+
gem_close(fd, handle);
+ intel_allocator_free(ahnd, handle);
+ intel_allocator_close(ahnd);
}
static int cmp(const void *A, const void *B)
@@ -568,14 +601,19 @@ static void prioinv(int fd, int dir, unsigned ring, const char *name)
static void userptr(int fd, int dir)
{
uint32_t handle;
+ uint64_t ahnd;
void *ptr;
+ ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
+ intel_allocator_alloc(ahnd, handle, 4096, ALIGNMENT);
- __capture1(fd, dir, 0, handle);
+ __capture1(fd, dir, 0, ahnd, handle, 4096);
gem_close(fd, handle);
+ intel_allocator_free(ahnd, handle);
+ intel_allocator_close(ahnd);
free(ptr);
}
--
2.26.0
More information about the Intel-gfx-trybot
mailing list