[Intel-gfx] [PATCH igt v3] igt/gem_ctx_isolation: Check isolation of registers between contexts

Chris Wilson chris at chris-wilson.co.uk
Tue Oct 24 14:21:59 UTC 2017


A new context assumes that all of its registers are in the default state
when it is created. What may happen is that a register written by one
context may leak into the second, causing mass confusion.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 tests/Makefile.sources    |   1 +
 tests/gem_ctx_isolation.c | 790 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 791 insertions(+)
 create mode 100644 tests/gem_ctx_isolation.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index ac9f90bc..d18b7461 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -57,6 +57,7 @@ TESTS_progs = \
 	gem_ctx_basic \
 	gem_ctx_create \
 	gem_ctx_exec \
+	gem_ctx_isolation \
 	gem_ctx_param \
 	gem_ctx_switch \
 	gem_ctx_thrash \
diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c
new file mode 100644
index 00000000..3a0ee19a
--- /dev/null
+++ b/tests/gem_ctx_isolation.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_dummyload.h"
+
+#define MAX_REG 0x40000
+#define NUM_REGS (MAX_REG / sizeof(uint32_t))
+
+#define PAGE_ALIGN(x) ALIGN(x, 4096)
+
+#define DIRTY 0x1
+#define UNSAFE 0x2
+
+enum {
+	RCS_MASK = 0x1,
+	BCS_MASK = 0x2,
+	VCS_MASK = 0x4,
+	VECS_MASK = 0x8,
+};
+
+#define ALL ~0u
+#define GEN_RANGE(x, y) ((ALL >> (32 - (y - x + 1))) << x)
+
+#define LAST_KNOWN_GEN 10
+
+static const struct named_register {
+	const char *name;
+	unsigned int gen_mask;
+	unsigned int engine_mask;
+	uint32_t offset;
+} safe_registers[] = {
+	{ "NOPID", ALL, RCS_MASK, 0x2094 },
+	{ "MI_PREDICATE_RESULT_2", ALL, RCS_MASK, 0x23bc },
+	{ "INSTPM", ALL, RCS_MASK, 0x20c0 },
+	{ "IA_VERTICES_COUNT (low)", ALL, RCS_MASK, 0x2310 },
+	{ "IA_VERTICES_COUNT (high)", ALL, RCS_MASK, 0x2314 },
+	{ "IA_PRIMITIVES_COUNT (low)", ALL, RCS_MASK, 0x2318 },
+	{ "IA_PRIMITIVES_COUNT (high)", ALL, RCS_MASK, 0x231c },
+	{ "VS_INVOCATION_COUNT (low)", ALL, RCS_MASK, 0x2320 },
+	{ "VS_INVOCATION_COUNT (high)", ALL, RCS_MASK, 0x2324 },
+	{ "HS_INVOCATION_COUNT (low)", ALL, RCS_MASK, 0x2300 },
+	{ "HS_INVOCATION_COUNT (high)", ALL, RCS_MASK, 0x2304 },
+	{ "DS_INVOCATION_COUNT (low)", ALL, RCS_MASK, 0x2308 },
+	{ "DS_INVOCATION_COUNT (high)", ALL, RCS_MASK, 0x230c },
+	{ "GS_INVOCATION_COUNT (low)", ALL, RCS_MASK, 0x2328 },
+	{ "GS_INVOCATION_COUNT (high)", ALL, RCS_MASK, 0x232c },
+	{ "GS_PRIMITIVES_COUNT (low)", ALL, RCS_MASK, 0x2330 },
+	{ "GS_PRIMITIVES_COUNT (high)", ALL, RCS_MASK, 0x2334 },
+	{ "CL_INVOCATION_COUNT (low)", ALL, RCS_MASK, 0x2338 },
+	{ "CL_INVOCATION_COUNT (high)", ALL, RCS_MASK, 0x233c },
+	{ "CL_PRIMITIVES_COUNT (low)", ALL, RCS_MASK, 0x2340 },
+	{ "CL_PRIMITIVES_COUNT (high)", ALL, RCS_MASK, 0x2344 },
+	{ "PS_INVOCATION_COUNT_0 (low)", ALL, RCS_MASK, 0x22c8 },
+	{ "PS_INVOCATION_COUNT_0 (high)", ALL, RCS_MASK, 0x22cc },
+	{ "PS_DEPTH_COUNT_0 (low)", ALL, RCS_MASK, 0x22d8 },
+	{ "PS_DEPTH_COUNT_0 (high)", ALL, RCS_MASK, 0x22dc },
+	{ "GPUGPU_DISPATCHDIMX", ALL, RCS_MASK, 0x2500 },
+	{ "GPUGPU_DISPATCHDIMY", ALL, RCS_MASK, 0x2504 },
+	{ "GPUGPU_DISPATCHDIMZ", ALL, RCS_MASK, 0x2508 },
+	{ "MI_PREDICATE_SRC0 (low)", ALL, RCS_MASK, 0x2400 },
+	{ "MI_PREDICATE_SRC0 (high)", ALL, RCS_MASK, 0x2404 },
+	{ "MI_PREDICATE_SRC1 (low)", ALL, RCS_MASK, 0x2408 },
+	{ "MI_PREDICATE_SRC1 (high)", ALL, RCS_MASK, 0x240c },
+	{ "MI_PREDICATE_DATA (low)", ALL, RCS_MASK, 0x2410 },
+	{ "MI_PREDICATE_DATA (high)", ALL, RCS_MASK, 0x2414 },
+	{ "MI_PRED_RESULT", ALL, RCS_MASK, 0x2418 },
+	{ "3DPRIM_END_OFFSET", ALL, RCS_MASK, 0x2420 },
+	{ "3DPRIM_START_VERTEX", ALL, RCS_MASK, 0x2430 },
+	{ "3DPRIM_VERTEX_COUNT", ALL, RCS_MASK, 0x2434 },
+	{ "3DPRIM_INSTANCE_COUNT", ALL, RCS_MASK, 0x2438 },
+	{ "3DPRIM_START_INSTANCE", ALL, RCS_MASK, 0x243c },
+	{ "3DPRIM_BASE_VERTEX", ALL, RCS_MASK, 0x2440 },
+	{ "GPGPU_THREADS_DISPATCHED (low)", ALL, RCS_MASK, 0x2290 },
+	{ "GPGPU_THREADS_DISPATCHED (high)", ALL, RCS_MASK, 0x2294 },
+	{ "PS_INVOCATION_COUNT_1 (low)", ALL, RCS_MASK, 0x22f0 },
+	{ "PS_INVOCATION_COUNT_1 (high)", ALL, RCS_MASK, 0x22f4 },
+	{ "PS_DEPTH_COUNT_1 (low)", ALL, RCS_MASK, 0x22f8 },
+	{ "PS_DEPTH_COUNT_1 (high)", ALL, RCS_MASK, 0x22fc },
+	{ "BB_OFFSET", ALL, RCS_MASK, 0x2158 },
+	{ "MI_PREDICATE_RESULT_1", ALL, RCS_MASK, 0x241c },
+	{ "CS_GPR[0]", ALL, RCS_MASK, 0x2600 },
+	{ "CS_GPR[1]", ALL, RCS_MASK, 0x2604 },
+	{ "CS_GPR[2]", ALL, RCS_MASK, 0x2608 },
+	{ "CS_GPR[3]", ALL, RCS_MASK, 0x260c },
+	{ "CS_GPR[4]", ALL, RCS_MASK, 0x2610 },
+	{ "CS_GPR[5]", ALL, RCS_MASK, 0x2614 },
+	{ "CS_GPR[6]", ALL, RCS_MASK, 0x2618 },
+	{ "CS_GPR[7]", ALL, RCS_MASK, 0x261c },
+	{ "CS_GPR[8]", ALL, RCS_MASK, 0x2620 },
+	{ "CS_GPR[9]", ALL, RCS_MASK, 0x2624 },
+	{ "CS_GPR[10]", ALL, RCS_MASK, 0x2628 },
+	{ "CS_GPR[11]", ALL, RCS_MASK, 0x262c },
+	{ "CS_GPR[12]", ALL, RCS_MASK, 0x2630 },
+	{ "CS_GPR[13]", ALL, RCS_MASK, 0x2634 },
+	{ "CS_GPR[14]", ALL, RCS_MASK, 0x2638 },
+	{ "CS_GPR[15]", ALL, RCS_MASK, 0x263c },
+	{ "CS_GPR[16]", ALL, RCS_MASK, 0x2640 },
+	{ "CS_GPR[17]", ALL, RCS_MASK, 0x2644 },
+	{ "CS_GPR[18]", ALL, RCS_MASK, 0x2648 },
+	{ "CS_GPR[19]", ALL, RCS_MASK, 0x264c },
+	{ "CS_GPR[20]", ALL, RCS_MASK, 0x2650 },
+	{ "CS_GPR[21]", ALL, RCS_MASK, 0x2654 },
+	{ "CS_GPR[22]", ALL, RCS_MASK, 0x2658 },
+	{ "CS_GPR[23]", ALL, RCS_MASK, 0x265c },
+	{ "CS_GPR[24]", ALL, RCS_MASK, 0x2660 },
+	{ "CS_GPR[25]", ALL, RCS_MASK, 0x2664 },
+	{ "CS_GPR[26]", ALL, RCS_MASK, 0x2668 },
+	{ "CS_GPR[27]", ALL, RCS_MASK, 0x266c },
+	{ "CS_GPR[28]", ALL, RCS_MASK, 0x2670 },
+	{ "CS_GPR[29]", ALL, RCS_MASK, 0x2674 },
+	{ "CS_GPR[30]", ALL, RCS_MASK, 0x2678 },
+	{ "CS_GPR[31]", ALL, RCS_MASK, 0x267c },
+	{ "OA_CTX_CONTROL", ALL, RCS_MASK, 0x2360 },
+	{ "OACTXID", ALL, RCS_MASK, 0x2364 },
+	{ "PS_INVOCATION_COUNT_2 (log)", ALL, RCS_MASK, 0x2448 },
+	{ "PS_INVOCATION_COUNT_2 (high)", ALL, RCS_MASK, 0x244c },
+	{ "PS_DEPTH_COUNT_2 (log)", ALL, RCS_MASK, 0x2450 },
+	{ "PS_DEPTH_COUNT_2 (high)", ALL, RCS_MASK, 0x245c },
+	{ "Cache_Mode_0", ALL, RCS_MASK, 0x7000 },
+	{ "Cache_Mode_1", ALL, RCS_MASK, 0x7004 },
+	{ "GT_MODE", ALL, RCS_MASK, 0x7008 },
+	{ "L3_Config", ALL, RCS_MASK, 0x7034 },
+	{ "TD_CTL", ALL, RCS_MASK, 0xe400 },
+	{ "TD_CTL2", ALL, RCS_MASK, 0xe404 },
+	{ "SO_NUM_PRIMS_WRITEN0 (low)", ALL, RCS_MASK, 0x5200 },
+	{ "SO_NUM_PRIMS_WRITEN0 (high)", ALL, RCS_MASK, 0x5204 },
+	{ "SO_NUM_PRIMS_WRITEN1 (low)", ALL, RCS_MASK, 0x5208 },
+	{ "SO_NUM_PRIMS_WRITEN1 (high)", ALL, RCS_MASK, 0x520c },
+	{ "SO_NUM_PRIMS_WRITEN2 (low)", ALL, RCS_MASK, 0x5210 },
+	{ "SO_NUM_PRIMS_WRITEN2 (high)", ALL, RCS_MASK, 0x521c },
+	{ "SO_NUM_PRIMS_WRITEN3 (low)", ALL, RCS_MASK, 0x5218 },
+	{ "SO_NUM_PRIMS_WRITEN3 (high)", ALL, RCS_MASK, 0x521c },
+	{ "SO_PRIM_STORAGE_NEEDED0 (low)", ALL, RCS_MASK, 0x5240 },
+	{ "SO_PRIM_STORAGE_NEEDED0 (high)", ALL, RCS_MASK, 0x5244 },
+	{ "SO_PRIM_STORAGE_NEEDED1 (low)", ALL, RCS_MASK, 0x5248 },
+	{ "SO_PRIM_STORAGE_NEEDED1 (high)", ALL, RCS_MASK, 0x524c },
+	{ "SO_PRIM_STORAGE_NEEDED2 (low)", ALL, RCS_MASK, 0x5250 },
+	{ "SO_PRIM_STORAGE_NEEDED2 (high)", ALL, RCS_MASK, 0x5254 },
+	{ "SO_PRIM_STORAGE_NEEDED3 (low)", ALL, RCS_MASK, 0x5258 },
+	{ "SO_PRIM_STORAGE_NEEDED3 (high)", ALL, RCS_MASK, 0x525c },
+	{ "SO_WRITE_OFFSET0", ALL, RCS_MASK, 0x5280 },
+	{ "SO_WRITE_OFFSET1", ALL, RCS_MASK, 0x5284 },
+	{ "SO_WRITE_OFFSET2", ALL, RCS_MASK, 0x5288 },
+	{ "SO_WRITE_OFFSET3", ALL, RCS_MASK, 0x528c },
+	{ "OA_CONTROL", ALL, RCS_MASK, 0x2b00 },
+	{ "PERF_CNT_1_DW0", ALL, RCS_MASK, 0x91b8 },
+	{ "PERF_CNT_1_DW1", ALL, RCS_MASK, 0x91bc },
+	{ "PERF_CNT_2_DW0", ALL, RCS_MASK, 0x91c0 },
+	{ "PERF_CNT_2_DW1", ALL, RCS_MASK, 0x91c4 },
+
+	/* Privileged (enabled by w/a + FORCE_TO_NONPRIV) */
+	{ "CTX_PREEMPT", GEN_RANGE(9, 10), RCS_MASK, 0x2248 },
+	{ "CS_CHICKEN1", GEN_RANGE(9, 10), RCS_MASK, 0x2580 },
+	{ "HDC_CHICKEN1", GEN_RANGE(9, 10), RCS_MASK, 0x7304 },
+	{ "L3SQREG1", ALL, RCS_MASK, 0xb010 },
+
+	{ "BCS_GPR[0]", ALL, BCS_MASK, 0x22600 },
+	{ "BCS_GPR[1]", ALL, BCS_MASK, 0x22604 },
+	{ "BCS_GPR[2]", ALL, BCS_MASK, 0x22608 },
+	{ "BCS_GPR[3]", ALL, BCS_MASK, 0x2260c },
+	{ "BCS_GPR[4]", ALL, BCS_MASK, 0x22610 },
+	{ "BCS_GPR[5]", ALL, BCS_MASK, 0x22614 },
+	{ "BCS_GPR[6]", ALL, BCS_MASK, 0x22618 },
+	{ "BCS_GPR[7]", ALL, BCS_MASK, 0x2261c },
+	{ "BCS_GPR[8]", ALL, BCS_MASK, 0x22620 },
+	{ "BCS_GPR[9]", ALL, BCS_MASK, 0x22624 },
+	{ "BCS_GPR[10]", ALL, BCS_MASK, 0x22628 },
+	{ "BCS_GPR[11]", ALL, BCS_MASK, 0x2262c },
+	{ "BCS_GPR[12]", ALL, BCS_MASK, 0x22630 },
+	{ "BCS_GPR[13]", ALL, BCS_MASK, 0x22634 },
+	{ "BCS_GPR[14]", ALL, BCS_MASK, 0x22638 },
+	{ "BCS_GPR[15]", ALL, BCS_MASK, 0x2263c },
+	{ "BCS_GPR[16]", ALL, BCS_MASK, 0x22640 },
+	{ "BCS_GPR[17]", ALL, BCS_MASK, 0x22644 },
+	{ "BCS_GPR[18]", ALL, BCS_MASK, 0x22648 },
+	{ "BCS_GPR[19]", ALL, BCS_MASK, 0x2264c },
+	{ "BCS_GPR[20]", ALL, BCS_MASK, 0x22650 },
+	{ "BCS_GPR[21]", ALL, BCS_MASK, 0x22654 },
+	{ "BCS_GPR[22]", ALL, BCS_MASK, 0x22658 },
+	{ "BCS_GPR[23]", ALL, BCS_MASK, 0x2265c },
+	{ "BCS_GPR[24]", ALL, BCS_MASK, 0x22660 },
+	{ "BCS_GPR[25]", ALL, BCS_MASK, 0x22664 },
+	{ "BCS_GPR[26]", ALL, BCS_MASK, 0x22668 },
+	{ "BCS_GPR[27]", ALL, BCS_MASK, 0x2266c },
+	{ "BCS_GPR[28]", ALL, BCS_MASK, 0x22670 },
+	{ "BCS_GPR[29]", ALL, BCS_MASK, 0x22674 },
+	{ "BCS_GPR[30]", ALL, BCS_MASK, 0x22678 },
+	{ "BCS_GPR[31]", ALL, BCS_MASK, 0x2267c },
+	{ "BCS_SWCTRL", ALL, BCS_MASK, 0x22200 },
+
+	{ "VCS0_GPR[0]", ALL, VCS_MASK, 0x12600 },
+	{ "VCS0_GPR[1]", ALL, VCS_MASK, 0x12604 },
+	{ "VCS0_GPR[2]", ALL, VCS_MASK, 0x12608 },
+	{ "VCS0_GPR[3]", ALL, VCS_MASK, 0x1260c },
+	{ "VCS0_GPR[4]", ALL, VCS_MASK, 0x12610 },
+	{ "VCS0_GPR[5]", ALL, VCS_MASK, 0x12614 },
+	{ "VCS0_GPR[6]", ALL, VCS_MASK, 0x12618 },
+	{ "VCS0_GPR[7]", ALL, VCS_MASK, 0x1261c },
+	{ "VCS0_GPR[8]", ALL, VCS_MASK, 0x12620 },
+	{ "VCS0_GPR[9]", ALL, VCS_MASK, 0x12624 },
+	{ "VCS0_GPR[10]", ALL, VCS_MASK, 0x12628 },
+	{ "VCS0_GPR[11]", ALL, VCS_MASK, 0x1262c },
+	{ "VCS0_GPR[12]", ALL, VCS_MASK, 0x12630 },
+	{ "VCS0_GPR[13]", ALL, VCS_MASK, 0x12634 },
+	{ "VCS0_GPR[14]", ALL, VCS_MASK, 0x12638 },
+	{ "VCS0_GPR[15]", ALL, VCS_MASK, 0x1263c },
+	{ "VCS0_GPR[16]", ALL, VCS_MASK, 0x22640 },
+	{ "VCS0_GPR[17]", ALL, VCS_MASK, 0x12644 },
+	{ "VCS0_GPR[18]", ALL, VCS_MASK, 0x12648 },
+	{ "VCS0_GPR[19]", ALL, VCS_MASK, 0x1264c },
+	{ "VCS0_GPR[20]", ALL, VCS_MASK, 0x12650 },
+	{ "VCS0_GPR[21]", ALL, VCS_MASK, 0x12654 },
+	{ "VCS0_GPR[22]", ALL, VCS_MASK, 0x12658 },
+	{ "VCS0_GPR[23]", ALL, VCS_MASK, 0x1265c },
+	{ "VCS0_GPR[24]", ALL, VCS_MASK, 0x12660 },
+	{ "VCS0_GPR[25]", ALL, VCS_MASK, 0x12664 },
+	{ "VCS0_GPR[26]", ALL, VCS_MASK, 0x12668 },
+	{ "VCS0_GPR[27]", ALL, VCS_MASK, 0x1266c },
+	{ "VCS0_GPR[28]", ALL, VCS_MASK, 0x12670 },
+	{ "VCS0_GPR[29]", ALL, VCS_MASK, 0x12674 },
+	{ "VCS0_GPR[30]", ALL, VCS_MASK, 0x12678 },
+	{ "VCS0_GPR[31]", ALL, VCS_MASK, 0x1267c },
+
+	//{ "MFC_VBDOX1[]", ALL, VCS_MASK, 0x12800 }, x 64
+	//{ "MFC_VBDOX2[]", ALL, VCS_MASK, 0x1c800 }, x 64
+
+	{ "VCS1_GPR[0]", ALL, VCS_MASK, 0x1c600 },
+	{ "VCS1_GPR[1]", ALL, VCS_MASK, 0x1c604 },
+	{ "VCS1_GPR[2]", ALL, VCS_MASK, 0x1c608 },
+	{ "VCS1_GPR[3]", ALL, VCS_MASK, 0x1c60c },
+	{ "VCS1_GPR[4]", ALL, VCS_MASK, 0x1c610 },
+	{ "VCS1_GPR[5]", ALL, VCS_MASK, 0x1c614 },
+	{ "VCS1_GPR[6]", ALL, VCS_MASK, 0x1c618 },
+	{ "VCS1_GPR[7]", ALL, VCS_MASK, 0x1c61c },
+	{ "VCS1_GPR[8]", ALL, VCS_MASK, 0x1c620 },
+	{ "VCS1_GPR[9]", ALL, VCS_MASK, 0x1c624 },
+	{ "VCS1_GPR[10]", ALL, VCS_MASK, 0x1c628 },
+	{ "VCS1_GPR[11]", ALL, VCS_MASK, 0x1c62c },
+	{ "VCS1_GPR[12]", ALL, VCS_MASK, 0x1c630 },
+	{ "VCS1_GPR[13]", ALL, VCS_MASK, 0x1c634 },
+	{ "VCS1_GPR[14]", ALL, VCS_MASK, 0x1c638 },
+	{ "VCS1_GPR[15]", ALL, VCS_MASK, 0x1c63c },
+	{ "VCS1_GPR[16]", ALL, VCS_MASK, 0x22640 },
+	{ "VCS1_GPR[17]", ALL, VCS_MASK, 0x1c644 },
+	{ "VCS1_GPR[18]", ALL, VCS_MASK, 0x1c648 },
+	{ "VCS1_GPR[19]", ALL, VCS_MASK, 0x1c64c },
+	{ "VCS1_GPR[20]", ALL, VCS_MASK, 0x1c650 },
+	{ "VCS1_GPR[21]", ALL, VCS_MASK, 0x1c654 },
+	{ "VCS1_GPR[22]", ALL, VCS_MASK, 0x1c658 },
+	{ "VCS1_GPR[23]", ALL, VCS_MASK, 0x1c65c },
+	{ "VCS1_GPR[24]", ALL, VCS_MASK, 0x1c660 },
+	{ "VCS1_GPR[25]", ALL, VCS_MASK, 0x1c664 },
+	{ "VCS1_GPR[26]", ALL, VCS_MASK, 0x1c668 },
+	{ "VCS1_GPR[27]", ALL, VCS_MASK, 0x1c66c },
+	{ "VCS1_GPR[28]", ALL, VCS_MASK, 0x1c670 },
+	{ "VCS1_GPR[29]", ALL, VCS_MASK, 0x1c674 },
+	{ "VCS1_GPR[30]", ALL, VCS_MASK, 0x1c678 },
+	{ "VCS1_GPR[31]", ALL, VCS_MASK, 0x1c67c },
+
+	{ "VECS_GPR[0]", ALL, VECS_MASK, 0x1a600 },
+	{ "VECS_GPR[1]", ALL, VECS_MASK, 0x1a604 },
+	{ "VECS_GPR[2]", ALL, VECS_MASK, 0x1a608 },
+	{ "VECS_GPR[3]", ALL, VECS_MASK, 0x1a60c },
+	{ "VECS_GPR[4]", ALL, VECS_MASK, 0x1a610 },
+	{ "VECS_GPR[5]", ALL, VECS_MASK, 0x1a614 },
+	{ "VECS_GPR[6]", ALL, VECS_MASK, 0x1a618 },
+	{ "VECS_GPR[7]", ALL, VECS_MASK, 0x1a61c },
+	{ "VECS_GPR[8]", ALL, VECS_MASK, 0x1a620 },
+	{ "VECS_GPR[9]", ALL, VECS_MASK, 0x1a624 },
+	{ "VECS_GPR[10]", ALL, VECS_MASK, 0x1a628 },
+	{ "VECS_GPR[11]", ALL, VECS_MASK, 0x1a62c },
+	{ "VECS_GPR[12]", ALL, VECS_MASK, 0x1a630 },
+	{ "VECS_GPR[13]", ALL, VECS_MASK, 0x1a634 },
+	{ "VECS_GPR[14]", ALL, VECS_MASK, 0x1a638 },
+	{ "VECS_GPR[15]", ALL, VECS_MASK, 0x1a63c },
+	{ "VECS_GPR[16]", ALL, VECS_MASK, 0x22640 },
+	{ "VECS_GPR[17]", ALL, VECS_MASK, 0x1a644 },
+	{ "VECS_GPR[18]", ALL, VECS_MASK, 0x1a648 },
+	{ "VECS_GPR[19]", ALL, VECS_MASK, 0x1a64c },
+	{ "VECS_GPR[20]", ALL, VECS_MASK, 0x1a650 },
+	{ "VECS_GPR[21]", ALL, VECS_MASK, 0x1a654 },
+	{ "VECS_GPR[22]", ALL, VECS_MASK, 0x1a658 },
+	{ "VECS_GPR[23]", ALL, VECS_MASK, 0x1a65c },
+	{ "VECS_GPR[24]", ALL, VECS_MASK, 0x1a660 },
+	{ "VECS_GPR[25]", ALL, VECS_MASK, 0x1a664 },
+	{ "VECS_GPR[26]", ALL, VECS_MASK, 0x1a668 },
+	{ "VECS_GPR[27]", ALL, VECS_MASK, 0x1a66c },
+	{ "VECS_GPR[28]", ALL, VECS_MASK, 0x1a670 },
+	{ "VECS_GPR[29]", ALL, VECS_MASK, 0x1a674 },
+	{ "VECS_GPR[30]", ALL, VECS_MASK, 0x1a678 },
+	{ "VECS_GPR[31]", ALL, VECS_MASK, 0x1a67c },
+
+	{}
+}, ignore_registers[] = {
+	{ "RCS timestamp", ALL, RCS_MASK, 0x2358 },
+	{ "VCS0 timestamp", ALL, VCS_MASK, 0x12358 },
+	{ "VCS1 timestamp", ALL, VCS_MASK, 0x1c358 },
+	{ "BCS timestamp", ALL, BCS_MASK, 0x22358 },
+	{ "VECS timestamp", ALL, VECS_MASK, 0x1a358 },
+	{}
+};
+
+static const char *register_name(uint32_t offset)
+{
+	for (const struct named_register *r = safe_registers; r->name; r++) {
+		if (r->offset == offset)
+			return r->name;
+	}
+
+	return "unknown";
+}
+
+static bool ignore_register(uint32_t offset)
+{
+	for (const struct named_register *r = ignore_registers; r->name; r++) {
+		if (r->offset == offset)
+			return true;
+	}
+
+	return false;
+}
+
+static uint32_t read_regs(int fd,
+			  uint32_t ctx, unsigned int engine,
+			  unsigned int flags)
+{
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry *reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	unsigned int regs_size, batch_size;
+	unsigned int engine_bit, gen_bit;
+	uint32_t *batch, *b;
+
+	switch (engine & 0x63) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		engine_bit = RCS_MASK;
+		break;
+	case I915_EXEC_BLT:
+		engine_bit = BCS_MASK;
+		break;
+	case I915_EXEC_BSD:
+		engine_bit = VCS_MASK;
+		break;
+	case I915_EXEC_VEBOX:
+		engine_bit = VECS_MASK;
+		break;
+	default:
+		igt_assert(0);
+	}
+	gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
+
+	reloc = calloc(NUM_REGS, sizeof(*reloc));
+	igt_assert(reloc);
+
+	regs_size = NUM_REGS * sizeof(uint32_t);
+	regs_size = PAGE_ALIGN(regs_size);
+
+	batch_size = NUM_REGS * 4 * sizeof(uint32_t) + 4;
+	batch_size = PAGE_ALIGN(batch_size);
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = gem_create(fd, regs_size);
+	obj[1].handle = gem_create(fd, batch_size);
+	obj[1].relocs_ptr = to_user_pointer(reloc);
+
+	b = batch = gem_mmap__cpu(fd, obj[1].handle, 0, batch_size, PROT_WRITE);
+	gem_set_domain(fd, obj[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	if (flags & UNSAFE) {
+		for (unsigned int n = 0; n < NUM_REGS; n++) {
+			*b++ = 0x24 << 23 | 2;
+			*b++ = n * sizeof(uint32_t);
+			reloc[n].target_handle = obj[0].handle;
+			reloc[n].presumed_offset = 0;
+			reloc[n].offset = (b - batch) * sizeof(*b);
+			reloc[n].delta = sizeof(uint32_t) * n;
+			reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
+			reloc[n].write_domain = I915_GEM_DOMAIN_RENDER;
+			*b++ = reloc[n].delta;
+			*b++ = 0;
+		}
+		obj[1].relocation_count = NUM_REGS;
+	} else {
+		unsigned int n = 0;
+
+		for (const struct named_register *r = safe_registers;
+		     r->name; r++) {
+			if (!(r->engine_mask & engine_bit))
+				continue;
+			if (!(r->gen_mask & gen_bit))
+				continue;
+
+			*b++ = 0x24 << 23 | 2; /* SRM */
+			*b++ = r->offset;
+			reloc[n].target_handle = obj[0].handle;
+			reloc[n].presumed_offset = 0;
+			reloc[n].offset = (b - batch) * sizeof(*b);
+			reloc[n].delta = r->offset;
+			reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
+			reloc[n].write_domain = I915_GEM_DOMAIN_RENDER;
+			*b++ = reloc[n].delta;
+			*b++ = 0;
+
+			n++;
+		}
+
+		obj[1].relocation_count = n;
+	}
+	*b++ = MI_BATCH_BUFFER_END;
+	munmap(batch, batch_size);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(obj);
+	execbuf.buffer_count = 2;
+	execbuf.flags = engine;
+	execbuf.rsvd1 = ctx;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj[1].handle);
+	free(reloc);
+
+	return obj[0].handle;
+}
+
+static void write_regs(int fd,
+		       uint32_t ctx, unsigned int engine,
+		       unsigned int flags,
+		       uint32_t value)
+{
+	struct drm_i915_gem_exec_object2 obj;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	unsigned int engine_bit, gen_bit;
+	unsigned int batch_size;
+	uint32_t *batch, *b;
+
+	switch (engine & 0x63) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		engine_bit = RCS_MASK;
+		break;
+	case I915_EXEC_BLT:
+		engine_bit = BCS_MASK;
+		break;
+	case I915_EXEC_BSD:
+		engine_bit = VCS_MASK;
+		break;
+	case I915_EXEC_VEBOX:
+		engine_bit = VECS_MASK;
+		break;
+	default:
+		igt_assert(0);
+	}
+	gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
+
+	batch_size = NUM_REGS * 3 * sizeof(uint32_t) + 4;
+	batch_size = PAGE_ALIGN(batch_size);
+
+	memset(&obj, 0, sizeof(obj));
+	obj.handle = gem_create(fd, batch_size);
+
+	b = batch = gem_mmap__cpu(fd, obj.handle, 0, batch_size, PROT_WRITE);
+	gem_set_domain(fd, obj.handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	if (flags & UNSAFE) {
+		for (unsigned int n = 0; n < NUM_REGS; n++) {
+			*b++ = 0x22 << 23 | 1; /* LRI */
+			*b++ = n * sizeof(uint32_t);
+			*b++ = value;
+		}
+	} else {
+		for (const struct named_register *r = safe_registers;
+		     r->name; r++) {
+			if (!(r->engine_mask & engine_bit))
+				continue;
+			if (!(r->gen_mask & gen_bit))
+				continue;
+			*b++ = 0x22 << 23 | 1; /* LRI */
+			*b++ = r->offset;
+			*b++ = value;
+		}
+	}
+	*b++ = MI_BATCH_BUFFER_END;
+	munmap(batch, batch_size);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(&obj);
+	execbuf.buffer_count = 1;
+	execbuf.flags = engine;
+	execbuf.rsvd1 = ctx;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj.handle);
+}
+
+static void restore_regs(int fd,
+			 uint32_t ctx, unsigned int engine,
+			 unsigned int flags,
+			 uint32_t regs)
+{
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_relocation_entry *reloc;
+	unsigned int engine_bit, gen_bit;
+	unsigned int batch_size;
+	uint32_t *batch, *b;
+
+	switch (engine & 0x63) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		engine_bit = RCS_MASK;
+		break;
+	case I915_EXEC_BLT:
+		engine_bit = BCS_MASK;
+		break;
+	case I915_EXEC_BSD:
+		engine_bit = VCS_MASK;
+		break;
+	case I915_EXEC_VEBOX:
+		engine_bit = VECS_MASK;
+		break;
+	default:
+		igt_assert(0);
+	}
+	gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
+
+	reloc = calloc(NUM_REGS, sizeof(*reloc));
+	igt_assert(reloc);
+
+	batch_size = NUM_REGS * 3 * sizeof(uint32_t) + 4;
+	batch_size = PAGE_ALIGN(batch_size);
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = regs;
+	obj[1].handle = gem_create(fd, batch_size);
+	obj[1].relocs_ptr = to_user_pointer(reloc);
+
+	b = batch = gem_mmap__cpu(fd, obj[1].handle, 0, batch_size, PROT_WRITE);
+	gem_set_domain(fd, obj[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	if (flags & UNSAFE) {
+		for (unsigned int n = 0; n < NUM_REGS; n++) {
+			*b++ = 0x29 << 23 | 2; /* LRM */
+			*b++ = n * sizeof(uint32_t);
+			reloc[n].target_handle = obj[0].handle;
+			reloc[n].presumed_offset = 0;
+			reloc[n].offset = (b - batch) * sizeof(*b);
+			reloc[n].delta = n * sizeof(uint32_t);
+			reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
+			reloc[n].write_domain = 0;
+			*b++ = reloc[n].delta;
+			*b++ = 0;
+		}
+
+		obj[1].relocation_count = NUM_REGS;
+	} else {
+		unsigned int n = 0;
+
+		for (const struct named_register *r = safe_registers;
+		     r->name; r++) {
+			if (!(r->engine_mask & engine_bit))
+				continue;
+			if (!(r->gen_mask & gen_bit))
+				continue;
+
+			*b++ = 0x29 << 23 | 2; /* LRM */
+			*b++ = r->offset;
+			reloc[n].target_handle = obj[0].handle;
+			reloc[n].presumed_offset = 0;
+			reloc[n].offset = (b - batch) * sizeof(*b);
+			reloc[n].delta = r->offset;
+			reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
+			reloc[n].write_domain = 0;
+			*b++ = reloc[n].delta;
+			*b++ = 0;
+
+			n++;
+		}
+
+		obj[1].relocation_count = n;
+	}
+	*b++ = MI_BATCH_BUFFER_END;
+	munmap(batch, batch_size);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(obj);
+	execbuf.buffer_count = 2;
+	execbuf.flags = engine;
+	execbuf.rsvd1 = ctx;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj[1].handle);
+}
+
+__attribute__((unused))
+static void dump_regs(int fd, unsigned int engine, unsigned int regs)
+{
+	unsigned int engine_bit, gen_bit;
+	unsigned int regs_size;
+	uint32_t *out;
+
+	switch (engine & 0x63) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		engine_bit = RCS_MASK;
+		break;
+	case I915_EXEC_BLT:
+		engine_bit = BCS_MASK;
+		break;
+	case I915_EXEC_BSD:
+		engine_bit = VCS_MASK;
+		break;
+	case I915_EXEC_VEBOX:
+		engine_bit = VECS_MASK;
+		break;
+	default:
+		igt_assert(0);
+	}
+	gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
+
+	regs_size = NUM_REGS * sizeof(uint32_t);
+	regs_size = PAGE_ALIGN(regs_size);
+
+	out = gem_mmap__cpu(fd, regs, 0, regs_size, PROT_READ);
+	gem_set_domain(fd, regs, I915_GEM_DOMAIN_CPU, 0);
+
+	for (const struct named_register *r = safe_registers; r->name; r++) {
+		if (!(r->engine_mask & engine_bit))
+			continue;
+		if (!(r->gen_mask & gen_bit))
+			continue;
+
+		igt_debug("0x%04x [%s]: 0x%08x\n",
+			  r->offset, r->name, out[r->offset/4]);
+	}
+	munmap(out, regs_size);
+}
+
+static void compare_regs(int fd, uint32_t A, uint32_t B, const char *who)
+{
+	unsigned int num_errors;
+	unsigned int regs_size;
+	uint32_t *a, *b;
+
+	regs_size = NUM_REGS * sizeof(uint32_t);
+	regs_size = PAGE_ALIGN(regs_size);
+
+	a = gem_mmap__cpu(fd, A, 0, regs_size, PROT_READ);
+	gem_set_domain(fd, A, I915_GEM_DOMAIN_CPU, 0);
+
+	b = gem_mmap__cpu(fd, B, 0, regs_size, PROT_READ);
+	gem_set_domain(fd, B, I915_GEM_DOMAIN_CPU, 0);
+
+	num_errors = 0;
+	for (unsigned int n = 0; n < NUM_REGS; n++) {
+		uint32_t offset = n * sizeof(uint32_t);
+		if (a[n] != b[n] && !ignore_register(offset)) {
+			igt_warn("Register 0x%04x [%s]: A=%08x B=%08x\n",
+				 offset, register_name(offset), a[n], b[n]);
+			num_errors++;
+		}
+	}
+	munmap(b, regs_size);
+	munmap(a, regs_size);
+
+	igt_assert_f(num_errors == 0,
+		     "%d registers mistached between %s.",
+		     num_errors, who);
+}
+
+static void isolation(int fd, unsigned int engine, unsigned int flags)
+{
+	static const uint32_t values[] = {
+		0x0,
+		0xffffffff,
+		0xcccccccc,
+		0x33333333,
+		0x55555555,
+		0xaaaaaaaa,
+		0xdeadbeef
+	};
+	unsigned int num_values = flags & DIRTY ? ARRAY_SIZE(values) : 1;
+
+	for (int v = 0; v < num_values; v++) {
+		igt_spin_t *spin = NULL;
+		uint32_t ctx[2];
+		uint32_t regs[2];
+		uint32_t dirty;
+
+		ctx[0] = gem_context_create(fd);
+		regs[0] = read_regs(fd, ctx[0], engine, flags);
+
+		if (flags & DIRTY) {
+			spin = igt_spin_batch_new(fd, ctx[0], engine, 0);
+			write_regs(fd, ctx[0], engine, flags, values[v]);
+		}
+
+		/*
+		 * We create and execute a new context, whilst the HW is
+		 * occupied with the previous context (we should switch from
+		 * the old to the new proto-context without idling, which could
+		 * then load the powercontext). If all goes well, we only see
+		 * the default values from this context, but if goes badly we
+		 * see the corruption from the previous context instead!
+		 */
+		ctx[1] = gem_context_create(fd);
+		regs[1] = read_regs(fd, ctx[1], engine, flags);
+
+		/*
+		 * Restore the original register values before the HW idles.
+		 * Or else it may never restart!
+		 */
+		dirty = read_regs(fd, ctx[0], engine, flags);
+		restore_regs(fd, ctx[0], engine, flags, regs[0]);
+
+		igt_spin_batch_free(fd, spin);
+
+		if (!(flags & DIRTY))
+			compare_regs(fd, regs[0], dirty, "two reads of the same ctx");
+		compare_regs(fd, regs[0], regs[1], "two virgin contexts");
+
+		for (int n = 0; n < ARRAY_SIZE(ctx); n++) {
+			gem_close(fd, regs[n]);
+			gem_context_destroy(fd, ctx[n]);
+		}
+		gem_close(fd, dirty);
+	}
+}
+
+igt_main
+{
+	const unsigned int platform_validation = 0;
+	int fd = -1;
+
+	igt_fixture {
+		int gen;
+
+		fd = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(fd);
+
+		/* For guaranteed context isolation */
+		igt_require(gem_has_execlists(fd));
+		gem_context_destroy(fd, gem_context_create(fd));
+
+		gen = intel_gen(intel_get_drm_devid(fd));
+		//igt_ci_fail_on(gen > LAST_KNOWN_GEN);
+		igt_skip_on(gen > LAST_KNOWN_GEN);
+	}
+
+	for (const struct intel_execution_engine *e =
+	     intel_execution_engines; e->name; e++) {
+		igt_subtest_group {
+			unsigned int engine = e->exec_id | e->flags;
+			igt_fixture {
+				gem_require_ring(fd, engine);
+			}
+
+			igt_subtest_f("%s-clean", e->name)
+				isolation(fd, engine, 0);
+			igt_subtest_f("%s-dirty", e->name)
+				isolation(fd, engine, DIRTY);
+
+			igt_subtest_f("%s-unsafe", e->name) {
+				igt_require(platform_validation);
+				isolation(fd, engine, DIRTY | UNSAFE);
+			}
+		}
+	}
+}
-- 
2.15.0.rc1



More information about the Intel-gfx mailing list