[igt-dev] [PATCH i-g-t v3 11/22] i915/gem_set_tiling_vs_blit.c: Remove libdrm dependency

Dominik Grzegorzek dominik.grzegorzek at intel.com
Fri Sep 18 10:58:45 UTC 2020


Use intel_bb / intel_buf to remove libdrm dependency. The tests
had depended on buffer reuse mechanism, so logic was changed a bit.

Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 tests/i915/gem_set_tiling_vs_blt.c | 225 ++++++++++++++++-------------
 1 file changed, 121 insertions(+), 104 deletions(-)

diff --git a/tests/i915/gem_set_tiling_vs_blt.c b/tests/i915/gem_set_tiling_vs_blt.c
index cb164154..b012d4c5 100644
--- a/tests/i915/gem_set_tiling_vs_blt.c
+++ b/tests/i915/gem_set_tiling_vs_blt.c
@@ -57,15 +57,10 @@
 #include "drm.h"
 #include "i915/gem.h"
 #include "igt.h"
-#include "intel_bufmgr.h"
 
 IGT_TEST_DESCRIPTION("Check for proper synchronization of tiling changes vs."
 		     " tiled gpu access.");
 
-static drm_intel_bufmgr *bufmgr;
-struct intel_batchbuffer *batch;
-uint32_t devid;
-
 #define TEST_SIZE (1024*1024)
 #define TEST_STRIDE (4*1024)
 #define TEST_HEIGHT(stride)	(TEST_SIZE/(stride))
@@ -73,159 +68,180 @@ uint32_t devid;
 
 uint32_t data[TEST_SIZE/4];
 
-static void do_test(uint32_t tiling, unsigned stride,
+static void __set_tiling(struct buf_ops *bops, struct intel_buf *buf,
+			 uint32_t tiling, unsigned stride)
+{
+	int ret;
+
+	ret = __gem_set_tiling(buf_ops_get_fd(bops), buf->handle, tiling, stride);
+
+	buf->surface[0].stride = TEST_WIDTH(stride);
+	buf->surface[0].size = buf->surface[0].stride * stride;
+	buf->tiling = tiling;
+
+	igt_assert_eq(ret, 0);
+}
+
+static void do_test(struct buf_ops *bops, uint32_t tiling, unsigned stride,
 		    uint32_t tiling_after, unsigned stride_after)
 {
-	drm_intel_bo *busy_bo, *test_bo, *target_bo;
-	int i, ret;
+	struct intel_buf *busy_buf, *test_buf, *target_buf;
+	struct intel_bb *ibb;
+	int i, fd = buf_ops_get_fd(bops);
 	uint32_t *ptr;
-	uint32_t test_bo_handle;
+	uint32_t test_buf_handle;
 	uint32_t blt_stride, blt_bits;
 	bool tiling_changed = false;
 
 	igt_info("filling ring .. ");
-	busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
+	busy_buf = intel_buf_create(bops, 1024, 1024, 16, 4096,
+				    I915_TILING_NONE, I915_COMPRESSION_NONE);
+	ibb = intel_bb_create(fd, 4 * 4096);
 
+	intel_bb_set_fencing(ibb, false);
+	intel_bb_add_intel_buf(ibb, busy_buf, true);
 	for (i = 0; i < 250; i++) {
-		BLIT_COPY_BATCH_START(0);
-		OUT_BATCH((3 << 24) | /* 32 bits */
+		intel_bb_blit_start(ibb, 0);
+		intel_bb_out(ibb, (3 << 24) | /* 32 bits */
 			  (0xcc << 16) | /* copy ROP */
 			  2*1024*4);
-		OUT_BATCH(0 << 16 | 1024);
-		OUT_BATCH((2048) << 16 | (2048));
-		OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
-		OUT_BATCH(0 << 16 | 0);
-		OUT_BATCH(2*1024*4);
-		OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
-		ADVANCE_BATCH();
-
-		if (batch->gen >= 6) {
-			BEGIN_BATCH(3, 0);
-			OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
-			OUT_BATCH(0);
-			OUT_BATCH(0);
-			ADVANCE_BATCH();
+		intel_bb_out(ibb, 0 << 16 | 1024);
+		intel_bb_out(ibb, (2048) << 16 | (2048));
+		intel_bb_emit_reloc_fenced(ibb, busy_buf->handle,
+					   I915_GEM_DOMAIN_RENDER,
+					   I915_GEM_DOMAIN_RENDER,
+					   0, busy_buf->addr.offset);
+		intel_bb_out(ibb, 0 << 16 | 0);
+		intel_bb_out(ibb, 2*1024*4);
+		intel_bb_emit_reloc_fenced(ibb, busy_buf->handle,
+					   I915_GEM_DOMAIN_RENDER,
+					   0, 0, busy_buf->addr.offset);
+
+		if (ibb->gen >= 6) {
+			intel_bb_out(ibb, XY_SETUP_CLIP_BLT_CMD);
+			intel_bb_out(ibb, 0);
+			intel_bb_out(ibb, 0);
 		}
 	}
-	intel_batchbuffer_flush(batch);
-
+	intel_bb_flush_blit(ibb);
+	intel_bb_set_fencing(ibb, true);
 	igt_info("playing tricks .. ");
 	/* first allocate the target so it gets out of the way of playing funky
 	 * tricks */
-	target_bo = drm_intel_bo_alloc(bufmgr, "target bo", TEST_SIZE, 4096);
+	target_buf = intel_buf_create(bops, TEST_WIDTH(TEST_STRIDE),
+				      TEST_HEIGHT(TEST_STRIDE), 32, 4096,
+				      I915_TILING_NONE, I915_COMPRESSION_NONE);
 
+	intel_bb_add_intel_buf(ibb, target_buf, true);
 	/* allocate buffer with parameters _after_ transition we want to check
 	 * and touch it, so that it's properly aligned in the gtt. */
-	test_bo = drm_intel_bo_alloc(bufmgr, "tiled busy bo", TEST_SIZE, 4096);
-	test_bo_handle = test_bo->handle;
-	ret = drm_intel_bo_set_tiling(test_bo, &tiling_after, stride_after);
-	igt_assert_eq(ret, 0);
-	drm_intel_gem_bo_map_gtt(test_bo);
-	ptr = test_bo->virtual;
-	*ptr = 0;
-	ptr = NULL;
-	drm_intel_gem_bo_unmap_gtt(test_bo);
+	test_buf = intel_buf_create(bops, TEST_WIDTH(stride_after),
+				   TEST_HEIGHT(stride_after), 32, 4096,
+				   tiling_after, I915_COMPRESSION_NONE);
 
-	drm_intel_bo_unreference(test_bo);
+	intel_bb_add_intel_buf(ibb, test_buf, true);
 
-	test_bo = NULL;
+	test_buf_handle = test_buf->handle;
 
-	/* note we need a bo bigger than batches, otherwise the buffer reuse
-	 * trick will fail. */
-	test_bo = drm_intel_bo_alloc(bufmgr, "busy bo", TEST_SIZE, 4096);
-	/* double check that the reuse trick worked */
-	igt_assert(test_bo_handle == test_bo->handle);
+	ptr = gem_mmap__gtt(fd, test_buf->handle, TEST_SIZE,
+			    PROT_READ | PROT_WRITE);
+	*ptr = 0;
+	gem_munmap(ptr, TEST_SIZE);
 
-	test_bo_handle = test_bo->handle;
-	/* ensure we have the right tiling before we start. */
-	ret = drm_intel_bo_set_tiling(test_bo, &tiling, stride);
-	igt_assert_eq(ret, 0);
+	/* Reuse previously aligned in the gtt object */
+	intel_buf_init_using_handle(bops, test_buf_handle, test_buf,
+				    TEST_WIDTH(stride), TEST_HEIGHT(stride), 32,
+				    4096, tiling, I915_COMPRESSION_NONE);
+	intel_buf_set_ownership(test_buf, true);
 
 	if (tiling == I915_TILING_NONE) {
-		drm_intel_bo_subdata(test_bo, 0, TEST_SIZE, data);
+		gem_write(fd, test_buf->handle, 0, data, TEST_SIZE);
 	} else {
-		drm_intel_gem_bo_map_gtt(test_bo);
-		ptr = test_bo->virtual;
+		ptr = gem_mmap__gtt(fd, test_buf->handle, TEST_SIZE,
+				    PROT_READ | PROT_WRITE);
+
 		memcpy(ptr, data, TEST_SIZE);
 		ptr = NULL;
-		drm_intel_gem_bo_unmap_gtt(test_bo);
+		gem_munmap(ptr, TEST_SIZE);
 	}
 
 	blt_stride = stride;
 	blt_bits = 0;
-	if (intel_gen(devid) >= 4 && tiling != I915_TILING_NONE) {
+	if (intel_gen(ibb->devid) >= 4 && tiling != I915_TILING_NONE) {
 		blt_stride /= 4;
 		blt_bits = XY_SRC_COPY_BLT_SRC_TILED;
 	}
 
-	BLIT_COPY_BATCH_START(blt_bits);
-	OUT_BATCH((3 << 24) | /* 32 bits */
+	intel_bb_blit_start(ibb, blt_bits);
+	intel_bb_out(ibb, (3 << 24) | /* 32 bits */
 		  (0xcc << 16) | /* copy ROP */
 		  stride);
-	OUT_BATCH(0 << 16 | 0);
-	OUT_BATCH((TEST_HEIGHT(stride)) << 16 | (TEST_WIDTH(stride)));
-	OUT_RELOC_FENCED(target_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
-	OUT_BATCH(0 << 16 | 0);
-	OUT_BATCH(blt_stride);
-	OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
-	ADVANCE_BATCH();
-	intel_batchbuffer_flush(batch);
-
-	drm_intel_bo_unreference(test_bo);
-
-	test_bo = drm_intel_bo_alloc_for_render(bufmgr, "tiled busy bo", TEST_SIZE, 4096);
-	/* double check that the reuse trick worked */
-	igt_assert(test_bo_handle == test_bo->handle);
-	ret = drm_intel_bo_set_tiling(test_bo, &tiling_after, stride_after);
-	igt_assert_eq(ret, 0);
+	intel_bb_out(ibb, 0 << 16 | 0);
+	intel_bb_out(ibb, (TEST_HEIGHT(stride)) << 16 | (TEST_WIDTH(stride)));
+	intel_bb_emit_reloc_fenced(ibb, target_buf->handle,
+				   I915_GEM_DOMAIN_RENDER,
+				   I915_GEM_DOMAIN_RENDER,
+				   0, target_buf->addr.offset);
+	intel_bb_out(ibb, 0 << 16 | 0);
+	intel_bb_out(ibb, blt_stride);
+	intel_bb_emit_reloc_fenced(ibb, test_buf->handle,
+				   I915_GEM_DOMAIN_RENDER,
+				   0, 0, test_buf->handle);
+	intel_bb_flush_blit(ibb);
+
+	__set_tiling(bops, test_buf, tiling_after, stride_after);
 
 	/* Note: We don't care about gen4+ here because the blitter doesn't use
 	 * fences there. So not setting tiling flags on the tiled buffer is ok.
 	 */
-	BLIT_COPY_BATCH_START(0);
-	OUT_BATCH((3 << 24) | /* 32 bits */
+
+	intel_bb_blit_start(ibb, 0);
+	intel_bb_out(ibb, (3 << 24) | /* 32 bits */
 		  (0xcc << 16) | /* copy ROP */
 		  stride_after);
-	OUT_BATCH(0 << 16 | 0);
-	OUT_BATCH((1) << 16 | (1));
-	OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
-	OUT_BATCH(0 << 16 | 0);
-	OUT_BATCH(stride_after);
-	OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
-	ADVANCE_BATCH();
-	intel_batchbuffer_flush(batch);
-
+	intel_bb_out(ibb, 0 << 16 | 0);
+	intel_bb_out(ibb, (1) << 16 | (1));
+	intel_bb_emit_reloc_fenced(ibb, test_buf->handle,
+				   I915_GEM_DOMAIN_RENDER,
+				   I915_GEM_DOMAIN_RENDER,
+				   0, test_buf->addr.offset);
+	intel_bb_out(ibb, 0 << 16 | 0);
+	intel_bb_out(ibb, stride_after);
+	intel_bb_emit_reloc_fenced(ibb, test_buf->handle,
+				   I915_GEM_DOMAIN_RENDER,
+				   0, 0, test_buf->addr.offset);
+	intel_bb_flush_blit(ibb);
 	/* Now try to trick the kernel the kernel into changing up the fencing
 	 * too early. */
-
 	igt_info("checking .. ");
 	memset(data, 0, TEST_SIZE);
-	drm_intel_bo_get_subdata(target_bo, 0, TEST_SIZE, data);
+
+	gem_read(fd, target_buf->handle, 0, data, TEST_SIZE);
 	for (i = 0; i < TEST_SIZE/4; i++)
 		igt_assert(data[i] == i);
+	/* check whether tiling on the test_buf actually changed. */
+	ptr = gem_mmap__gtt(fd, test_buf->handle, TEST_SIZE,
+			    PROT_WRITE | PROT_READ);
 
-	/* check whether tiling on the test_bo actually changed. */
-	drm_intel_gem_bo_map_gtt(test_bo);
-	ptr = test_bo->virtual;
 	for (i = 0; i < TEST_SIZE/4; i++)
 		if (ptr[i] != data[i])
 			tiling_changed = true;
-	ptr = NULL;
-	drm_intel_gem_bo_unmap_gtt(test_bo);
+
+	gem_munmap(ptr, TEST_SIZE);
 	igt_assert(tiling_changed);
 
-	drm_intel_bo_unreference(test_bo);
-	drm_intel_bo_unreference(target_bo);
-	drm_intel_bo_unreference(busy_bo);
+	intel_buf_destroy(test_buf);
+	intel_buf_destroy(target_buf);
+	intel_buf_destroy(busy_buf);
+
 	igt_info("done\n");
 }
 
-int fd;
-
 igt_main
 {
-	int i;
+	int fd, i;
 	uint32_t tiling, tiling_after;
+	struct buf_ops *bops;
 
 	igt_fixture {
 		for (i = 0; i < 1024*256; i++)
@@ -235,17 +251,13 @@ igt_main
 		igt_require_gem(fd);
 		gem_require_blitter(fd);
 		igt_require(gem_available_fences(fd) > 0);
-
-		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
-		drm_intel_bufmgr_gem_enable_reuse(bufmgr);
-		devid = intel_get_drm_devid(fd);
-		batch = intel_batchbuffer_alloc(bufmgr, devid);
+		bops = buf_ops_create(fd);
 	}
 
 	igt_subtest("untiled-to-tiled") {
 		tiling = I915_TILING_NONE;
 		tiling_after = I915_TILING_X;
-		do_test(tiling, TEST_STRIDE, tiling_after, TEST_STRIDE);
+		do_test(bops, tiling, TEST_STRIDE, tiling_after, TEST_STRIDE);
 		igt_assert(tiling == I915_TILING_NONE);
 		igt_assert(tiling_after == I915_TILING_X);
 	}
@@ -253,7 +265,7 @@ igt_main
 	igt_subtest("tiled-to-untiled") {
 		tiling = I915_TILING_X;
 		tiling_after = I915_TILING_NONE;
-		do_test(tiling, TEST_STRIDE, tiling_after, TEST_STRIDE);
+		do_test(bops, tiling, TEST_STRIDE, tiling_after, TEST_STRIDE);
 		igt_assert(tiling == I915_TILING_X);
 		igt_assert(tiling_after == I915_TILING_NONE);
 	}
@@ -261,8 +273,13 @@ igt_main
 	igt_subtest("tiled-to-tiled") {
 		tiling = I915_TILING_X;
 		tiling_after = I915_TILING_X;
-		do_test(tiling, TEST_STRIDE/2, tiling_after, TEST_STRIDE);
+		do_test(bops, tiling, TEST_STRIDE/2, tiling_after, TEST_STRIDE);
 		igt_assert(tiling == I915_TILING_X);
 		igt_assert(tiling_after == I915_TILING_X);
 	}
+
+	igt_fixture{
+		buf_ops_destroy(bops);
+		close(fd);
+	}
 }
-- 
2.20.1



More information about the igt-dev mailing list