[Intel-gfx] [PATCH] tests: Add gem_copy_align_blt test

Mika Kuoppala mika.kuoppala at linux.intel.com
Thu Jan 29 05:49:07 PST 2015


Copy a block into destination object with varying dst/src offsets.
Put guard area before and after the blit target to see that it didn't
touch memory out of blit boundaries.

v2: Test description, git add and gitignore (Thomas)
    Strip it out from gem_userptr (Chris)

References: https://bugs.freedesktop.org/show_bug.cgi?id=79053
Cc: Thomas Wood <thomas.wood at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
 tests/.gitignore           |   1 +
 tests/Makefile.sources     |   1 +
 tests/gem_copy_align_blt.c | 225 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 227 insertions(+)
 create mode 100644 tests/gem_copy_align_blt.c

diff --git a/tests/.gitignore b/tests/.gitignore
index 88a6405..d5f2907 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -98,6 +98,7 @@ gem_storedw_loop_vebox
 gem_stress
 gem_threaded_access_tiled
 gem_tiled_blits
+gem_copy_align_blt
 gem_tiled_fence_blits
 gem_tiled_partial_pwrite_pread
 gem_tiled_pread
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 74deec3..0b17b8a 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -60,6 +60,7 @@ TESTS_progs_M = \
 	gem_set_tiling_vs_blt \
 	gem_storedw_batches_loop \
 	gem_tiled_blits \
+	gem_copy_align_blt \
 	gem_tiled_partial_pwrite_pread \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
diff --git a/tests/gem_copy_align_blt.c b/tests/gem_copy_align_blt.c
new file mode 100644
index 0000000..6376d70
--- /dev/null
+++ b/tests/gem_copy_align_blt.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *    Mika Kuoppala <mika.kuoppala at intel.com>
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include "i915_drm.h"
+#include "intel_chipset.h"
+#include "intel_reg.h"
+#include "ioctl_wrappers.h"
+#include "drm.h"
+#include "drmtest.h"
+
+IGT_TEST_DESCRIPTION("Verify blt copying with boundary checks");
+
+static int gen = 0;
+
+#define WIDTH 32
+#define HEIGHT 32
+
+static uint32_t linear[WIDTH*HEIGHT];
+
+static void
+copy_align(int fd,
+	   uint32_t dst, uint32_t dst_offset,
+	   uint32_t src, uint32_t src_offset,
+	   uint32_t w, uint32_t h)
+{
+	uint32_t batch[12];
+	struct drm_i915_gem_relocation_entry reloc[2];
+	struct drm_i915_gem_exec_object2 obj[3];
+	struct drm_i915_gem_execbuffer2 exec;
+	uint32_t handle;
+	int i=0;
+
+	batch[i++] = XY_SRC_COPY_BLT_CMD |
+		XY_SRC_COPY_BLT_WRITE_ALPHA |
+		XY_SRC_COPY_BLT_WRITE_RGB;
+	if (gen >= 8)
+		batch[i - 1] |= 8;
+	else
+		batch[i - 1] |= 6;
+
+	batch[i++] = (3 << 24) | /* 32 bits */
+		(0xcc << 16) | /* copy ROP */
+		w * 4;
+
+	/* The >= gen8 blitter needs to have dst/src base
+	 * addresses aligned to 4k. So we need to handle the
+	 * offsets with with dst/src coordinates */
+	batch[i++] = dst_offset; /* dst, x1,y2 */
+	batch[i++] = ((h) << 16) | (w + dst_offset); /* dst x2,y2 */
+	batch[i++] = 0;
+	if (gen >= 8)
+		batch[i++] = 0;
+	batch[i++] = src_offset; /* src x1,y1 */
+	batch[i++] = w * 4;
+
+	batch[i++] = 0;
+	if (gen >= 8)
+		batch[i++] = 0;
+
+	batch[i++] = MI_BATCH_BUFFER_END;
+	batch[i++] = MI_NOOP;
+
+	handle = gem_create(fd, 4096);
+	gem_write(fd, handle, 0, batch, sizeof(batch));
+
+	reloc[0].target_handle = dst;
+	reloc[0].delta = 0;
+	reloc[0].offset = 4 * sizeof(batch[0]);
+	reloc[0].presumed_offset = 0;
+	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
+
+	reloc[1].target_handle = src;
+	reloc[1].delta = 0;
+	reloc[1].offset = 7 * sizeof(batch[0]);
+	if (gen >= 8)
+		reloc[1].offset += sizeof(batch[0]);
+	reloc[1].presumed_offset = 0;
+	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[1].write_domain = 0;
+
+	memset(obj, 0, sizeof(obj));
+	exec.buffer_count = 0;
+	obj[exec.buffer_count++].handle = dst;
+	if (src != dst)
+		obj[exec.buffer_count++].handle = src;
+	obj[exec.buffer_count].handle = handle;
+	obj[exec.buffer_count].relocation_count = 2;
+	obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc;
+	exec.buffer_count++;
+	exec.buffers_ptr = (uintptr_t)obj;
+
+	exec.batch_start_offset = 0;
+	exec.batch_len = i * 4;
+	exec.DR1 = exec.DR4 = 0;
+	exec.num_cliprects = 0;
+	exec.cliprects_ptr = 0;
+	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
+	i915_execbuffer2_set_context_id(exec, 0);
+	exec.rsvd2 = 0;
+
+	gem_execbuf(fd, &exec);
+
+	gem_sync(fd, handle);
+	gem_close(fd, handle);
+}
+
+static bool area_ok(uint32_t *p, const int size, const uint32_t sval)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		if (p[i] != i + sval)
+			return false;
+
+	return true;
+}
+
+static void
+_test_destination_align(int fd,
+			const uint32_t dst_align, const uint32_t src_align,
+			const uint32_t w, const uint32_t h)
+{
+	const uint32_t size = sizeof(linear) / 4;
+	const uint32_t guard_val = 0xdeadf00d;
+	uint32_t *p;
+	uint32_t h_dst;
+	uint32_t h_src;
+	int i;
+	const int dwords = w * h;
+
+	igt_assert(dwords < size);
+	p = linear;
+
+	h_src = gem_create(fd, size * 4);
+	igt_assert(h_src != 0);
+
+	for (i = 0; i < size; i++)
+		p[i] = i;
+
+	gem_write(fd, h_src, 0, p, size * 4);
+	gem_read(fd, h_src, 0, p, size * 4);
+
+	igt_assert(area_ok(p, size, 0));
+
+	h_dst = gem_create(fd, size * 4);
+	igt_assert(h_dst != 0);
+
+	for (i = 0; i < size; i++)
+		p[i] = guard_val + i;
+
+	gem_write(fd, h_dst, 0, p, size * 4);
+	gem_read(fd, h_dst, 0, p, size * 4);
+
+	igt_assert(area_ok(p, size, guard_val));
+
+	copy_align(fd, h_dst, dst_align, h_src, src_align, w, h);
+	gem_read(fd, h_dst, 0, p, size * 4);
+
+	igt_assert(area_ok(p, dst_align, guard_val));
+	igt_assert(area_ok(p + dst_align, dwords, src_align));
+	igt_assert(area_ok(p + dwords + dst_align,
+			   size - dwords - dst_align,
+			   guard_val + dwords + dst_align));
+
+	gem_close(fd, h_src);
+	gem_close(fd, h_dst);
+}
+
+static void test_destination_align(int fd,
+				   int dst_align, int src_align,
+				   int max_w, int max_h)
+{
+	int i, j, w, h;
+
+	for (i = 0; i <= src_align; i++)
+		for (j = 0; j <= dst_align; j++)
+			for (h = 1; h <= max_h; h ++)
+				for (w = 1; w <= max_w; w++)
+					_test_destination_align(fd, j, i, w, h);
+}
+
+igt_simple_main
+{
+	int fd = -1;
+
+	igt_skip_on_simulation();
+
+	igt_fixture {
+		fd = drm_open_any();
+		igt_assert(fd >= 0);
+
+		gen = intel_gen(intel_get_drm_devid(fd));
+	}
+
+	test_destination_align(fd, 16, 16, 16, 16);
+}
-- 
1.9.1



More information about the Intel-gfx mailing list