[Intel-gfx] [PATCH i-g-t] Add tests/gem_exec_parse_blt
Mika Kuoppala
mika.kuoppala at linux.intel.com
Thu Nov 14 17:18:06 UTC 2019
From: Mika Kuoppala <mika.kuoppala at intel.com>
For testing blitter engine command parser on gen9.
v2: bad jump offset
v3: rebase
v4: improve bb start and subcase it
v5: fix presumed offsets (Jon)
v6: name, remove spurious gem_syncs (Chris)
v7: jump further backwards (Daniel), ctx_param_engines (Chris)
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Daniel Vetter <daniel at ffwll.ch>
Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
tests/Makefile.sources | 7 +-
tests/i915/gem_exec_parse_blt.c | 1030 +++++++++++++++++++++++++
tests/intel-ci/fast-feedback.testlist | 1 +
tests/meson.build | 1 +
4 files changed, 1037 insertions(+), 2 deletions(-)
create mode 100644 tests/i915/gem_exec_parse_blt.c
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 6b1d4cb2..33a4559d 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -206,8 +206,8 @@ gem_exec_basic_SOURCES = i915/gem_exec_basic.c
TESTS_progs += gem_exec_big
gem_exec_big_SOURCES = i915/gem_exec_big.c
-TESTS_progs += gem_exec_blt
-gem_exec_blt_SOURCES = i915/gem_exec_blt.c
+TESTS_progs += gem_exec_parse_blt
+gem_exec_blt_SOURCES = i915/gem_exec_parse_blt.c
TESTS_progs += gem_exec_capture
gem_exec_capture_SOURCES = i915/gem_exec_capture.c
@@ -245,6 +245,9 @@ gem_exec_params_SOURCES = i915/gem_exec_params.c
TESTS_progs += gem_exec_parse
gem_exec_parse_SOURCES = i915/gem_exec_parse.c
+TESTS_progs += gem_blt_parse
+gem_blt_parse_SOURCES = i915/gem_blt_parse.c
+
TESTS_progs += gem_exec_reloc
gem_exec_reloc_SOURCES = i915/gem_exec_reloc.c
diff --git a/tests/i915/gem_exec_parse_blt.c b/tests/i915/gem_exec_parse_blt.c
new file mode 100644
index 00000000..b8b2bb50
--- /dev/null
+++ b/tests/i915/gem_exec_parse_blt.c
@@ -0,0 +1,1030 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "igt.h"
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <drm.h>
+
+/* To help craft commands known to be invalid across all engines */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_INVALID_CLIENT 0x7
+
+#define MI_LOAD_REGISTER_REG (0x2a << 23)
+#define MI_STORE_REGISTER_MEM (0x24 << 23)
+#define MI_ARB_ON_OFF (0x8 << 23)
+#define MI_USER_INTERRUPT (0x02 << 23)
+#define MI_FLUSH_DW (0x26 << 23)
+#define MI_ARB_CHECK (0x05 << 23)
+#define MI_REPORT_HEAD (0x07 << 23)
+#define MI_SUSPEND_FLUSH (0x0b << 23)
+#define MI_LOAD_SCAN_LINES_EXCL (0x13 << 23)
+#define MI_UPDATE_GTT (0x23 << 23)
+
+#define BCS_SWCTRL 0x22200
+#define BCS_GPR_BASE 0x22600
+#define BCS_GPR(n) (0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) (0x22600 + (n) * 8 + 4)
+
+#define HANDLE_SIZE 4096
+
+static int parser_version;
+
+static int command_parser_version(int fd)
+{
+ int version = -1;
+ drm_i915_getparam_t gp;
+
+ gp.param = I915_PARAM_CMD_PARSER_VERSION;
+ gp.value = &version;
+
+ if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0)
+ return version;
+
+ return -1;
+}
+
+static int __exec_batch_patched(int fd, int engine,
+ uint32_t cmd_bo, const uint32_t *cmds, int size,
+ uint32_t target_bo, uint64_t target_offset, uint64_t target_delta)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_relocation_entry reloc[1];
+
+ gem_write(fd, cmd_bo, 0, cmds, size);
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = target_bo;
+ obj[1].handle = cmd_bo;
+
+ memset(reloc, 0, sizeof(reloc));
+ reloc[0].offset = target_offset;
+ reloc[0].target_handle = target_bo;
+ reloc[0].delta = target_delta;
+ reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].presumed_offset = -1;
+
+ obj[1].relocs_ptr = to_user_pointer(reloc);
+ obj[1].relocation_count = 1;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.batch_len = size;
+ execbuf.flags = engine;
+
+ return __gem_execbuf(fd, &execbuf);
+}
+
+static void exec_batch_patched(int fd, int engine,
+ uint32_t cmd_bo, const uint32_t *cmds,
+ int size, int patch_offset,
+ long int expected_value)
+{
+ const uint32_t target_bo = gem_create(fd, HANDLE_SIZE);
+ uint64_t actual_value = 0;
+ long int ret;
+
+ ret = __exec_batch_patched(fd, engine, cmd_bo, cmds, size, target_bo, patch_offset, 0);
+ if (ret) {
+ igt_assert_lt(ret, 0);
+ gem_close(fd, target_bo);
+ igt_assert_eq(ret, expected_value);
+ return;
+ }
+
+ gem_read(fd, target_bo, 0, &actual_value, sizeof(actual_value));
+
+ gem_close(fd, target_bo);
+
+ igt_assert_eq(actual_value, expected_value);
+}
+
+static int __exec_batch(int fd, int engine, uint32_t cmd_bo,
+ const uint32_t *cmds, int size)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[1];
+ int ret;
+
+ gem_write(fd, cmd_bo, 0, cmds, size);
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = cmd_bo;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 1;
+ execbuf.batch_len = size;
+ execbuf.flags = engine;
+
+ ret = __gem_execbuf(fd, &execbuf);
+
+ return ret;
+}
+
+#if 0
+static void print_batch(const uint32_t *cmds, const uint32_t sz)
+{
+ const int commands = sz / 4;
+ int i;
+
+ igt_info("Batch size %d\n", sz);
+ for (i = 0; i < commands; i++)
+ igt_info("0x%08x: 0x%08x\n", i, cmds[i]);
+}
+#else
+#define print_batch(cmds, size)
+#endif
+
+#define exec_batch(fd, engine, bo, cmds, sz, expected) \
+ print_batch(cmds, sz); \
+ igt_assert_eq(__exec_batch(fd, engine, bo, cmds, sz), expected)
+
+static void exec_split_batch(int fd, int engine, const uint32_t *cmds,
+ int size, int expected_ret)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[1];
+ uint32_t cmd_bo;
+ const uint32_t noop[1024] = { 0 };
+ const int alloc_size = 4096 * 2;
+ const int actual_start_offset = 4096-sizeof(uint32_t);
+
+ /* Allocate and fill a 2-page batch with noops */
+ cmd_bo = gem_create(fd, alloc_size);
+ gem_write(fd, cmd_bo, 0, noop, sizeof(noop));
+ gem_write(fd, cmd_bo, 4096, noop, sizeof(noop));
+
+ /* Write the provided commands such that the first dword
+ * of the command buffer is the last dword of the first
+ * page (i.e. the command is split across the two pages).
+ */
+ gem_write(fd, cmd_bo, actual_start_offset, cmds, size);
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = cmd_bo;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 1;
+ /* NB: We want batch_start_offset and batch_len to point to the block
+ * of the actual commands (i.e. at the last dword of the first page),
+ * but have to adjust both the start offset and length to meet the
+ * kernel driver's requirements on the alignment of those fields.
+ */
+ execbuf.batch_start_offset = actual_start_offset & ~0x7;
+ execbuf.batch_len =
+ ALIGN(size + actual_start_offset - execbuf.batch_start_offset,
+ 0x8);
+ execbuf.flags = engine;
+
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), expected_ret);
+
+ gem_close(fd, cmd_bo);
+}
+
+static void exec_batch_chained(int fd, int engine,
+ uint32_t cmd_bo, const uint32_t *cmds,
+ int size, int patch_offset,
+ uint64_t expected_value,
+ int expected_return)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[3];
+ struct drm_i915_gem_relocation_entry reloc[1];
+ struct drm_i915_gem_relocation_entry first_level_reloc;
+
+ const uint32_t target_bo = gem_create(fd, 4096);
+ const uint32_t first_level_bo = gem_create(fd, 4096);
+ uint64_t actual_value = 0;
+ int ret;
+
+ const uint32_t first_level_cmds[] = {
+ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | 1,
+ 0,
+ 0,
+ MI_BATCH_BUFFER_END,
+ };
+
+ gem_write(fd, first_level_bo, 0,
+ first_level_cmds, sizeof(first_level_cmds));
+ gem_write(fd, cmd_bo, 0, cmds, size);
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = target_bo;
+ obj[1].handle = cmd_bo;
+ obj[2].handle = first_level_bo;
+
+ memset(reloc, 0, sizeof(reloc));
+ reloc[0].offset = patch_offset;
+ reloc[0].delta = 0;
+ reloc[0].target_handle = target_bo;
+ reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].presumed_offset = -1;
+
+ obj[1].relocation_count = 1;
+ obj[1].relocs_ptr = to_user_pointer(&reloc);
+
+ memset(&first_level_reloc, 0, sizeof(first_level_reloc));
+ first_level_reloc.offset = 4;
+ first_level_reloc.delta = 0;
+ first_level_reloc.target_handle = cmd_bo;
+ first_level_reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+ first_level_reloc.write_domain = 0;
+ obj[2].relocation_count = 1;
+ obj[2].relocs_ptr = to_user_pointer(&first_level_reloc);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 3;
+ execbuf.batch_len = sizeof(first_level_cmds);
+ execbuf.flags = engine;
+
+ ret = __gem_execbuf(fd, &execbuf);
+ if (expected_return && ret == expected_return)
+ goto out;
+
+ gem_read(fd,target_bo, 0, &actual_value, sizeof(actual_value));
+
+out:
+ if (!expected_return)
+ igt_assert_eq(expected_value, actual_value);
+ else
+ igt_assert_neq(expected_value, actual_value);
+
+ gem_close(fd, first_level_bo);
+ gem_close(fd, target_bo);
+}
+
+static void test_secure_batches(const int fd)
+{
+ int v = -1;
+ drm_i915_getparam_t gp;
+
+ gp.param = I915_PARAM_HAS_SECURE_BATCHES;
+ gp.value = &v;
+
+ igt_assert_eq(drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp), 0);
+ igt_assert_eq(v, 0);
+}
+
+struct cmd {
+ uint32_t cmd;
+ int len;
+ const char *name;
+};
+
+#define CMD(C, L) { .cmd = (C), .len = (L), .name = #C }
+#define CMD_N(C) { .cmd = (C), .len = 1, .name = #C }
+
+static const struct cmd allowed_cmds[] = {
+ CMD_N(MI_NOOP),
+ CMD_N(MI_USER_INTERRUPT),
+ CMD_N(MI_WAIT_FOR_EVENT),
+ CMD(MI_FLUSH_DW, 5),
+ CMD_N(MI_ARB_CHECK),
+ CMD_N(MI_REPORT_HEAD),
+ CMD_N(MI_FLUSH),
+ CMD_N(MI_ARB_ON_OFF),
+ CMD_N(MI_SUSPEND_FLUSH),
+ CMD(MI_LOAD_SCAN_LINES_INCL, 2),
+ CMD(MI_LOAD_SCAN_LINES_EXCL, 2),
+};
+
+static uint32_t *inject_cmd(uint32_t *batch, const uint32_t cmd, int len)
+{
+ int i = 0;
+
+ batch[i++] = cmd;
+
+ while (--len)
+ batch[i++] = 0;
+
+ return &batch[i];
+}
+
+static unsigned long batch_num_cmds(const uint32_t * const batch_start,
+ const uint32_t * const batch_end)
+{
+ igt_assert_lte((unsigned long)batch_start, (unsigned long)batch_end);
+
+ return batch_end - batch_start;
+}
+
+static unsigned long batch_bytes(const uint32_t * const batch_start,
+ const uint32_t * const batch_end)
+{
+ const unsigned long bytes = batch_num_cmds(batch_start, batch_end) * 4;
+
+ igt_assert(!(bytes & 0x7));
+
+ return bytes;
+}
+
+static void test_allowed_all(const int fd, const uint32_t handle)
+{
+ uint32_t batch[4096];
+ uint32_t *b = &batch[0];
+
+ for (int i = 0; i < ARRAY_SIZE(allowed_cmds); i++)
+ b = inject_cmd(b, allowed_cmds[i].cmd,
+ allowed_cmds[i].len);
+
+ if (!(batch_num_cmds(batch, b) % 2))
+ b = inject_cmd(b, MI_NOOP, 1);
+
+ b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
+
+ exec_batch(fd, I915_EXEC_BLT, handle, batch, batch_bytes(batch, b), 0);
+}
+
+static void test_allowed_single(const int fd, const uint32_t handle)
+{
+ uint32_t batch[4096];
+ int ret;
+
+ for (int i = 0; i < ARRAY_SIZE(allowed_cmds); i++) {
+ uint32_t *b = &batch[0];
+
+ b = inject_cmd(b, allowed_cmds[i].cmd,
+ allowed_cmds[i].len);
+
+ if (!(batch_num_cmds(batch, b) % 2))
+ b = inject_cmd(b, MI_NOOP, 1);
+
+ b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
+
+ ret = __exec_batch(fd, I915_EXEC_BLT, handle,
+ batch, batch_bytes(batch, b));
+
+ igt_assert_eq(ret, 0);
+ };
+}
+
+static void test_bb_secure(const int fd, const uint32_t handle)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[1];
+ struct drm_i915_gem_relocation_entry reloc[1];
+ int ret;
+
+ const uint32_t batch_secure[] = {
+ MI_BATCH_BUFFER_START | 1,
+ 12,
+ 0,
+ MI_NOOP,
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+
+ gem_write(fd, handle, 0, batch_secure, sizeof(batch_secure));
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = handle;
+
+ memset(reloc, 0, sizeof(reloc));
+ reloc[0].offset = 1 * sizeof(uint32_t);
+ reloc[0].target_handle = handle;
+ reloc[0].delta = 4 * sizeof(uint32_t);
+ reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].write_domain = 0;
+ reloc[0].presumed_offset = -1;
+
+ obj[0].relocs_ptr = to_user_pointer(reloc);
+ obj[0].relocation_count = 1;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 1;
+ execbuf.batch_len = sizeof(batch_secure);
+ execbuf.flags = I915_EXEC_BLT;
+
+ ret = __gem_execbuf(fd, &execbuf);
+
+ igt_assert_eq(ret, -EACCES);
+}
+
+#define BB_START_PARAM 0
+#define BB_START_OUT 1
+#define BB_START_CMD 2
+#define BB_START_FAR 3
+
+static void test_bb_start(const int fd, const uint32_t handle, int test)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_relocation_entry reloc[3];
+ const uint32_t target_bo = gem_create(fd, 4096);
+ uint32_t *dst;
+ int ret;
+ unsigned int jump_off, footer_pos;
+ const uint32_t batch_header[] = {
+ MI_NOOP,
+ MI_NOOP,
+ MI_NOOP,
+ MI_NOOP,
+ MI_STORE_DWORD_IMM,
+ 0,
+ 0,
+ 1,
+ MI_STORE_DWORD_IMM,
+ 4,
+ 0,
+ 2,
+ MI_COND_BATCH_BUFFER_END | 1,
+ 0,
+ 0,
+ 0
+ };
+ const uint32_t batch_footer[] = {
+ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | 1,
+ 0,
+ 0,
+ MI_BATCH_BUFFER_END,
+ };
+ uint32_t batch[1024];
+
+ igt_require(gem_can_store_dword(fd, I915_EXEC_BLT));
+
+ memset(batch, 0, sizeof(batch));
+ memcpy(batch, batch_header, sizeof(batch_header));
+
+ switch (test) {
+ case BB_START_PARAM:
+ jump_off = 5 * sizeof(uint32_t);
+ break;
+ case BB_START_CMD:
+ case BB_START_FAR:
+ jump_off = 8 * sizeof(uint32_t);
+ break;
+ default:
+ jump_off = 0xf00d0000;
+ }
+
+ if (test == BB_START_FAR)
+ footer_pos = (sizeof(batch) - sizeof(batch_footer));
+ else
+ footer_pos = sizeof(batch_header);
+
+ memcpy(batch + footer_pos / sizeof(uint32_t),
+ batch_footer, sizeof(batch_footer));
+ gem_write(fd, handle, 0, batch, sizeof(batch));
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = target_bo;
+ obj[1].handle = handle;
+
+ memset(reloc, 0, sizeof(reloc));
+ reloc[0].offset = 5 * sizeof(uint32_t);
+ reloc[0].target_handle = obj[0].handle;
+ reloc[0].delta = 0;
+ reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
+ reloc[0].presumed_offset = -1;
+
+ reloc[1].offset = 9 * sizeof(uint32_t);
+ reloc[1].target_handle = obj[0].handle;
+ reloc[1].delta = 1 * sizeof(uint32_t);
+ reloc[1].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[1].write_domain = I915_GEM_DOMAIN_COMMAND;
+ reloc[1].presumed_offset = -1;
+
+ reloc[2].offset = footer_pos + 1 * sizeof(uint32_t);
+ reloc[2].target_handle = obj[1].handle;
+ reloc[2].delta = jump_off;
+ reloc[2].read_domains = I915_GEM_DOMAIN_COMMAND;
+ reloc[2].write_domain = 0;
+ reloc[2].presumed_offset = -1;
+
+ obj[1].relocs_ptr = to_user_pointer(reloc);
+ obj[1].relocation_count = 3;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.batch_len = sizeof(batch);
+ execbuf.flags = I915_EXEC_BLT;
+
+ dst = gem_mmap__cpu(fd, obj[0].handle, 0, 4096,
+ PROT_READ | PROT_WRITE);
+
+ igt_assert_eq(dst[0], 0);
+ igt_assert_eq(dst[1], 0);
+
+ ret = __gem_execbuf(fd, &execbuf);
+
+ switch (test) {
+ case BB_START_PARAM:
+ igt_assert_eq(ret, -EINVAL);
+ break;
+ case BB_START_CMD:
+ case BB_START_FAR:
+ igt_assert_eq(ret, 0);
+
+ while (READ_ONCE(dst[0]) == 0)
+ ;
+
+ while (READ_ONCE(dst[1]) == 0)
+ ;
+
+ igt_assert_eq(dst[0], 1);
+ igt_assert_eq(dst[1], 2);
+
+ igt_info("values now %x %x\n", dst[0], dst[1]);
+
+ dst[0] = 0;
+
+ igt_info("values now %x %x\n", dst[0], dst[1]);
+
+ igt_assert_eq(dst[0], 0);
+ igt_assert_eq(dst[1], 2);
+
+ break;
+
+ case BB_START_OUT:
+ igt_assert_eq(ret, -EINVAL);
+ break;
+ }
+
+ gem_munmap(dst, 4096);
+ gem_close(fd, target_bo);
+}
+
+static void test_bb_chained(const int fd, const uint32_t handle)
+{
+ const uint32_t batch[] = {
+ (0x20 << 23) | 2, /* MI_STORE_DATA_IMM */
+ 0,
+ 0,
+ 0xbaadf00d,
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+
+ exec_batch_chained(fd, I915_EXEC_RENDER,
+ handle,
+ batch, sizeof(batch),
+ 4,
+ 0xbaadf00d,
+ 0);
+
+ exec_batch_chained(fd, I915_EXEC_BLT,
+ handle,
+ batch, sizeof(batch),
+ 4,
+ 0xbaadf00d,
+ EPERM);
+}
+
+static void test_cmd_crossing_page(const int fd, const uint32_t handle)
+{
+ const uint32_t lri_ok[] = {
+ MI_LOAD_REGISTER_IMM,
+ BCS_GPR(0),
+ 0xbaadf00d,
+ MI_BATCH_BUFFER_END,
+ };
+ const uint32_t store_reg[] = {
+ MI_STORE_REGISTER_MEM | (4 - 2),
+ BCS_GPR(0),
+ 0, /* reloc */
+ 0, /* reloc */
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+
+ exec_split_batch(fd, I915_EXEC_BLT,
+ lri_ok, sizeof(lri_ok),
+ 0);
+
+ exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ 2 * sizeof(uint32_t), /* reloc */
+ 0xbaadf00d);
+}
+
+static void test_invalid_length(const int fd, const uint32_t handle)
+{
+ const uint32_t ok_val = 0xbaadf00d;
+ const uint32_t bad_val = 0xf00dbaad;
+ const uint32_t noops[8192] = { 0, };
+
+ const uint32_t lri_ok[] = {
+ MI_LOAD_REGISTER_IMM,
+ BCS_GPR(0),
+ ok_val,
+ MI_BATCH_BUFFER_END,
+ };
+
+ const uint32_t lri_bad[] = {
+ MI_LOAD_REGISTER_IMM,
+ BCS_GPR(0),
+ bad_val,
+ MI_BATCH_BUFFER_END,
+ };
+
+ const uint32_t store_reg[] = {
+ MI_STORE_REGISTER_MEM | (4 - 2),
+ BCS_GPR(0),
+ 0, /* reloc */
+ 0, /* reloc */
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ lri_ok, sizeof(lri_ok),
+ 0);
+
+ exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ 2 * sizeof(uint32_t), /* reloc */
+ ok_val);
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ lri_bad, 0,
+ 0);
+
+ exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ 2 * sizeof(uint32_t), /* reloc */
+ ok_val);
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ lri_ok, 4096,
+ 0);
+
+ igt_assert_eq(__gem_write(fd, handle, 0, noops, 4097), -EINVAL);
+}
+
+struct reg {
+ uint32_t addr;
+ uint32_t mask;
+ bool masked_write;
+ bool privileged;
+};
+
+#define REG_M(ADDR, MASK, WM, P) { (ADDR), (MASK), (WM), (P) }
+#define REG(ADDR) REG_M(ADDR, 0xffffffff, false, false)
+#define REG_P(ADDR) REG_M(ADDR, 0xffffffff, false, true)
+
+static const struct reg regs[] = {
+ REG_M(BCS_SWCTRL, 0x3, true, false),
+ REG(BCS_GPR(0)),
+ REG(BCS_GPR_UDW(0)),
+ REG(BCS_GPR(1)),
+ REG(BCS_GPR_UDW(1)),
+ REG(BCS_GPR(2)),
+ REG(BCS_GPR_UDW(2)),
+ REG(BCS_GPR(3)),
+ REG(BCS_GPR_UDW(3)),
+ REG(BCS_GPR(4)),
+ REG(BCS_GPR_UDW(4)),
+ REG(BCS_GPR(5)),
+ REG(BCS_GPR_UDW(5)),
+ REG(BCS_GPR(6)),
+ REG(BCS_GPR_UDW(6)),
+ REG(BCS_GPR(7)),
+ REG(BCS_GPR_UDW(7)),
+ REG(BCS_GPR(8)),
+ REG(BCS_GPR_UDW(8)),
+ REG(BCS_GPR(9)),
+ REG(BCS_GPR_UDW(9)),
+ REG(BCS_GPR(10)),
+ REG(BCS_GPR_UDW(10)),
+ REG(BCS_GPR(11)),
+ REG(BCS_GPR_UDW(11)),
+ REG(BCS_GPR(12)),
+ REG(BCS_GPR_UDW(12)),
+ REG(BCS_GPR(13)),
+ REG(BCS_GPR_UDW(13)),
+ REG(BCS_GPR(14)),
+ REG(BCS_GPR_UDW(14)),
+ REG(BCS_GPR(15)),
+ REG(BCS_GPR_UDW(15)),
+
+ REG_P(0),
+ REG_P(200000),
+
+ REG_P(BCS_SWCTRL - 1),
+ REG_P(BCS_SWCTRL - 2),
+ REG_P(BCS_SWCTRL - 3),
+ REG_P(BCS_SWCTRL - 4),
+ REG_P(BCS_SWCTRL + 4),
+
+ REG_P(BCS_GPR(0) - 1),
+ REG_P(BCS_GPR(0) - 2),
+ REG_P(BCS_GPR(0) - 3),
+ REG_P(BCS_GPR(0) - 4),
+ REG_P(BCS_GPR_UDW(15) + 4),
+};
+
+static void test_register(const int fd, const uint32_t handle,
+ const struct reg *r)
+{
+ const uint32_t lri_zero[] = {
+ MI_LOAD_REGISTER_IMM,
+ r->addr,
+ r->masked_write ? 0xffff0000 : 0,
+ MI_BATCH_BUFFER_END,
+ };
+
+ const uint32_t lri_mask[] = {
+ MI_LOAD_REGISTER_IMM,
+ r->addr,
+ r->masked_write ? (r->mask << 16) | r->mask : r->mask,
+ MI_BATCH_BUFFER_END,
+ };
+
+ const uint32_t store_reg[] = {
+ MI_STORE_REGISTER_MEM | (4 - 2),
+ r->addr,
+ 0, /* reloc */
+ 0, /* reloc */
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ lri_mask, sizeof(lri_mask),
+ r->privileged ? -EACCES : 0);
+
+ exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ 2 * sizeof(uint32_t), /* reloc */
+ r->privileged ? -EACCES : r->mask);
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ lri_zero, sizeof(lri_zero),
+ r->privileged ? -EACCES : 0);
+
+ exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ 2 * sizeof(uint32_t), /* reloc */
+ r->privileged ? -EACCES : 0);
+}
+
+static void test_valid_registers(const int fd, const uint32_t handle)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ test_register(fd, handle, ®s[i]);
+}
+
+static long int read_reg(const int fd, const uint32_t handle,
+ const uint32_t addr)
+{
+ const uint32_t store_reg[] = {
+ MI_STORE_REGISTER_MEM | (4 - 2),
+ addr,
+ 0, /* reloc */
+ 0, /* reloc */
+ MI_NOOP,
+ MI_BATCH_BUFFER_END,
+ };
+ uint32_t target_bo;
+ uint32_t value;
+ long int ret;
+
+ target_bo = gem_create(fd, HANDLE_SIZE);
+
+ ret = __exec_batch_patched(fd, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
+ target_bo, 2 * sizeof(uint32_t), 0);
+
+ if (ret) {
+ igt_assert_lt(ret, 0);
+ gem_close(fd, target_bo);
+ return ret;
+ }
+
+ gem_read(fd, target_bo, 0, &value, sizeof(value));
+
+ gem_close(fd, target_bo);
+
+ return value;
+}
+
+static int write_reg(const int fd, const uint32_t handle,
+ const uint32_t addr, const uint32_t val)
+{
+ const uint32_t lri[] = {
+ MI_LOAD_REGISTER_IMM,
+ addr,
+ val,
+ MI_BATCH_BUFFER_END,
+ };
+
+ return __exec_batch(fd, I915_EXEC_BLT, handle,
+ lri, sizeof(lri));
+}
+
+static void test_unaligned_access(const int fd, const uint32_t handle)
+{
+ const uint32_t addr = BCS_GPR(4);
+ const uint32_t val = 0xbaadfead;
+ const uint32_t pre = 0x12345678;
+ const uint32_t post = 0x87654321;
+
+ igt_assert_eq(write_reg(fd, handle, addr - 4, pre), 0);
+ igt_assert_eq(write_reg(fd, handle, addr, val), 0);
+ igt_assert_eq(write_reg(fd, handle, addr + 4, post), 0);
+
+ igt_assert_eq(read_reg(fd, handle, addr - 4), pre);
+ igt_assert_eq(read_reg(fd, handle, addr), val);
+ igt_assert_eq(read_reg(fd, handle, addr + 4), post);
+
+ for (int i = 0; i < 4; i++) {
+ igt_assert_eq(write_reg(fd, handle, addr + i, val), 0);
+ igt_assert_eq(read_reg(fd, handle, addr), val);
+
+ igt_assert_eq(read_reg(fd, handle, addr + 1), val);
+ igt_assert_eq(read_reg(fd, handle, addr + 2), val);
+ igt_assert_eq(read_reg(fd, handle, addr + 3), val);
+ igt_assert_eq(read_reg(fd, handle, addr + 4), post);
+ igt_assert_eq(read_reg(fd, handle, addr - 3), pre);
+ igt_assert_eq(read_reg(fd, handle, addr - 2), pre);
+ igt_assert_eq(read_reg(fd, handle, addr - 1), pre);
+ }
+}
+
+static void test_reject_on_engine(int fd, uint32_t handle, unsigned int engine)
+{
+ const uint32_t invalid_cmd[] = {
+ INSTR_INVALID_CLIENT << INSTR_CLIENT_SHIFT,
+ MI_BATCH_BUFFER_END,
+ };
+ const uint32_t invalid_set_context[] = {
+ MI_SET_CONTEXT | 32, /* invalid length */
+ MI_BATCH_BUFFER_END,
+ };
+
+ exec_batch(fd, engine, handle,
+ invalid_cmd, sizeof(invalid_cmd),
+ -EINVAL);
+
+ exec_batch(fd, engine, handle,
+ invalid_set_context, sizeof(invalid_set_context),
+ -EINVAL);
+}
+
+static void test_rejected(int fd, uint32_t handle, bool ctx_param)
+{
+#define engine_class(e, n) ((e)->engines[(n)].engine_class)
+#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
+
+ if (ctx_param) {
+ int i;
+
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = 0,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+
+ memset(&engines, 0, sizeof(engines));
+ for (i = 0; i <= I915_EXEC_RING_MASK; i++) {
+ engine_class(&engines, i) = I915_ENGINE_CLASS_COPY;
+ engine_instance(&engines, i) = 0;
+ }
+ gem_context_set_param(fd, ¶m);
+
+ for (i = 0; i <= I915_EXEC_RING_MASK; i++)
+ test_reject_on_engine(fd, handle, i);
+
+ param.size = 0;
+ gem_context_set_param(fd, ¶m);
+ } else {
+ test_reject_on_engine(fd, handle, I915_EXEC_BLT);
+ }
+}
+
+igt_main
+{
+ uint32_t handle;
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(fd);
+ gem_require_blitter(fd);
+
+ parser_version = command_parser_version(fd);
+ /* igt_require(parser_version == 10); */
+
+ igt_require(gem_uses_ppgtt(fd));
+ igt_require(gem_has_blt(fd));
+ igt_require(intel_gen(intel_get_drm_devid(fd)) == 9);
+
+ handle = gem_create(fd, HANDLE_SIZE);
+
+ igt_fork_hang_detector(fd);
+ }
+
+ igt_subtest("secure-batches")
+ test_secure_batches(fd);
+
+ igt_subtest("allowed-all")
+ test_allowed_all(fd, handle);
+
+ igt_subtest("allowed-single")
+ test_allowed_single(fd, handle);
+
+ igt_subtest("bb-start-param")
+ test_bb_start(fd, handle, BB_START_PARAM);
+
+ igt_subtest("bb-start-out")
+ test_bb_start(fd, handle, BB_START_OUT);
+
+ igt_subtest("bb-secure")
+ test_bb_secure(fd, handle);
+
+ igt_subtest("bb-chained")
+ test_bb_chained(fd, handle);
+
+ igt_subtest("cmd-crossing-page")
+ test_cmd_crossing_page(fd, handle);
+
+ igt_subtest("batch-without-end") {
+ const uint32_t noop[1024] = { 0 };
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ noop, sizeof(noop),
+ -EINVAL);
+ }
+
+ igt_subtest("batch-zero-length") {
+ const uint32_t noop[] = { 0, MI_BATCH_BUFFER_END };
+
+ exec_batch(fd, I915_EXEC_BLT, handle,
+ noop, 0,
+ -EINVAL);
+ }
+
+ igt_subtest("batch-invalid-length")
+ test_invalid_length(fd, handle);
+
+ igt_subtest("basic-rejected")
+ test_rejected(fd, handle, false);
+
+ igt_subtest("basic-rejected-ctx-param")
+ test_rejected(fd, handle, true);
+
+ igt_subtest("valid-registers")
+ test_valid_registers(fd, handle);
+
+ igt_subtest("unaligned-access")
+ test_unaligned_access(fd, handle);
+
+ igt_subtest_group {
+ igt_hang_t hang;
+
+ igt_fixture igt_allow_hang(fd, 0, 0);
+
+ igt_subtest("bb-start-cmd")
+ test_bb_start(fd, handle, BB_START_CMD);
+
+ igt_subtest("bb-start-far")
+ test_bb_start(fd, handle, BB_START_FAR);
+
+ igt_fixture igt_disallow_hang(fd, hang);
+ }
+
+ igt_fixture {
+ igt_stop_hang_detector();
+ gem_close(fd, handle);
+
+ close(fd);
+ }
+}
diff --git a/tests/intel-ci/fast-feedback.testlist b/tests/intel-ci/fast-feedback.testlist
index dec6fdda..47bad68c 100644
--- a/tests/intel-ci/fast-feedback.testlist
+++ b/tests/intel-ci/fast-feedback.testlist
@@ -204,3 +204,4 @@ igt at i915_module_load@reload
igt at i915_module_load@reload-no-display
igt at i915_module_load@reload-with-fault-injection
igt at i915_pm_rpm@module-reload
+igt at gem_blt_parse@bb-start #expected hang so last
diff --git a/tests/meson.build b/tests/meson.build
index 44bddd02..755fc9e6 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -149,6 +149,7 @@ i915_progs = [
'gem_exec_parallel',
'gem_exec_params',
'gem_exec_parse',
+ 'gem_exec_parse_blt',
'gem_exec_reloc',
'gem_exec_reuse',
'gem_exec_schedule',
--
2.17.1
More information about the Intel-gfx
mailing list