[igt-dev] [PATCH i-g-t] tests/gem_render_copy: Add software tiling / detiling support

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Fri Nov 29 09:36:53 UTC 2019


Older GENs have mmapable GGTT which does buffer tiling / detaling
automatically. Newer GENs have looses this possibility so software
tiling / detiling is required.

Heavy code refactoring was done to allow configure specific tile buffer
operations regarding to GENx generation.

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Imre Deak <imre.deak at intel.com>
Cc: Katarzyna Dec <katarzyna.dec at intel.com>
---
 tests/i915/gem_render_copy.c | 491 ++++++++++++++++++++++++++++-------
 1 file changed, 403 insertions(+), 88 deletions(-)

diff --git a/tests/i915/gem_render_copy.c b/tests/i915/gem_render_copy.c
index 67be079c..cbd616a7 100644
--- a/tests/i915/gem_render_copy.c
+++ b/tests/i915/gem_render_copy.c
@@ -53,16 +53,119 @@ IGT_TEST_DESCRIPTION("Basic test for the render_copy() function.");
 #define WIDTH 512
 #define HEIGHT 512
 
+struct gen_format;
+
 typedef struct {
 	int drm_fd;
 	uint32_t devid;
 	drm_intel_bufmgr *bufmgr;
 	struct intel_batchbuffer *batch;
 	igt_render_copyfunc_t render_copy;
+	struct gen_format *format;
 } data_t;
 static int opt_dump_png = false;
 static int check_all_pixels = false;
 
+typedef void (*fn_copy)(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_gtt(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_wc(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_cpu(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_x(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_y(data_t *, struct igt_buf *, uint32_t *);
+static void copy_linear_to_yf(data_t *, struct igt_buf *, uint32_t *);
+static void copy_gtt_to_linear(data_t *, struct igt_buf *, uint32_t *);
+static void copy_wc_to_linear(data_t *, struct igt_buf *, uint32_t *);
+static void copy_cpu_to_linear(data_t *, struct igt_buf *, uint32_t *);
+static void copy_x_to_linear(data_t *, struct igt_buf *, uint32_t *);
+static void copy_y_to_linear(data_t *, struct igt_buf *, uint32_t *);
+static void copy_yf_to_linear(data_t *, struct igt_buf *, uint32_t *);
+
+#define TILE_NONE   (1 << I915_TILING_NONE)
+#define TILE_X      (1 << I915_TILING_X)
+#define TILE_Y      (1 << I915_TILING_Y)
+#define TILE_Yf     (1 << I915_TILING_Yf)
+#define TILE_Ys     (1 << I915_TILING_Ys)
+
+struct gen_format {
+	int gen_start;
+	int gen_end;
+	uint32_t supported_tiles;
+	uint32_t hw_tiles;
+	fn_copy linear_to;
+	fn_copy linear_to_x;
+	fn_copy linear_to_y;
+	fn_copy linear_to_yf;
+	fn_copy linear_to_ys;
+	fn_copy to_linear;
+	fn_copy x_to_linear;
+	fn_copy y_to_linear;
+	fn_copy yf_to_linear;
+	fn_copy ys_to_linear;
+};
+
+struct gen_format gen_formats[] = {
+	/* Generations 0 - 8 */
+	{ .gen_start         = 0,
+	  .gen_end           = 8,
+	  .supported_tiles   = TILE_NONE | TILE_X | TILE_Y,
+	  .hw_tiles          = TILE_X | TILE_Y,
+	  .linear_to         = copy_linear_to_cpu,
+	  .linear_to_x       = copy_linear_to_gtt,
+	  .linear_to_y       = copy_linear_to_gtt,
+	  .to_linear         = copy_cpu_to_linear,
+	  .x_to_linear       = copy_gtt_to_linear,
+	  .y_to_linear       = copy_gtt_to_linear,
+	},
+	/* Generations 9 - 11 */
+	{ .gen_start         = 9,
+	  .gen_end           = 11,
+	  .supported_tiles   = TILE_NONE | TILE_X | TILE_Y | TILE_Yf,
+	  .hw_tiles          = TILE_X | TILE_Y,
+	  .linear_to         = copy_linear_to_cpu,
+	  .linear_to_x       = copy_linear_to_gtt,
+	  .linear_to_y       = copy_linear_to_gtt,
+	  .linear_to_yf      = copy_linear_to_yf,
+	  .to_linear         = copy_cpu_to_linear,
+	  .x_to_linear       = copy_gtt_to_linear,
+	  .y_to_linear       = copy_gtt_to_linear,
+	  .yf_to_linear      = copy_yf_to_linear,
+	},
+	/* Generation 12 */
+	{ .gen_start         = 12,
+	  .gen_end           = 12,
+	  .supported_tiles   = TILE_NONE | TILE_X | TILE_Y | TILE_Yf | TILE_Ys,
+	  .linear_to         = copy_linear_to_wc,
+	  .linear_to_x       = copy_linear_to_x,
+	  .linear_to_y       = copy_linear_to_y,
+	  .linear_to_yf      = copy_linear_to_yf,
+	  .linear_to_ys      = NULL,               /* to be implemented */
+	  .to_linear         = copy_wc_to_linear,
+	  .x_to_linear       = copy_x_to_linear,
+	  .y_to_linear       = copy_y_to_linear,
+	  .yf_to_linear      = copy_yf_to_linear,
+	  .ys_to_linear      = NULL,               /* to be implemented */
+	},
+};
+
+static struct gen_format *get_gen_format(int generation)
+{
+	struct gen_format *fmt = NULL;
+
+	for (int i = 0; i < ARRAY_SIZE(gen_formats); i++) {
+		if (generation >= gen_formats[i].gen_start &&
+		    generation <= gen_formats[i].gen_end) {
+			fmt = &gen_formats[i];
+			igt_debug("generation: %d, supported tiles: 0x%02x\n",
+				  generation, fmt->supported_tiles);
+			break;
+		}
+	}
+
+	igt_assert(fmt);
+
+	return fmt;
+}
+
 static const char *make_filename(const char *filename)
 {
 	static char buf[64];
@@ -72,6 +175,62 @@ static const char *make_filename(const char *filename)
 	return buf;
 }
 
+static void *alloc_aligned(uint64_t size)
+{
+	void *p;
+
+	igt_assert_eq(posix_memalign(&p, 16, size), 0);
+
+	return p;
+}
+
+static void *x_ptr(void *ptr,
+		   unsigned int x, unsigned int y,
+		   unsigned int stride, unsigned int cpp)
+{
+	const int tile_width = 512;
+	const int tile_height = 8;
+	const int tile_size = tile_width * tile_height;
+	int tile_x, tile_y;
+	int offset_x, offset_y, pos;
+
+	x *= cpp;
+	tile_x = x / tile_width;
+	tile_y = y / tile_height;
+	offset_x = (tile_x * tile_size);
+	offset_y = (tile_y * stride * tile_height);
+
+	pos = offset_y + offset_x +
+			(y % tile_height * tile_width) + (x % tile_width);
+
+	return ptr + pos;
+}
+
+static void *y_ptr(void *ptr,
+		   unsigned int x, unsigned int y,
+		   unsigned int stride, unsigned int cpp)
+{
+	const int tile_width = 128;
+	const int tile_height = 32;
+	const int owords = 16;
+	const int tile_size = tile_width * tile_height;
+	int tile_x, tile_y;
+	int offset_x, offset_y, pos;
+	int shift_x, shift_y;
+
+	x *= cpp;
+	tile_x = x / tile_width;
+	tile_y = y / tile_height;
+	offset_x = tile_x * tile_size;
+	offset_y = tile_y * stride * tile_height;
+	shift_x = x % owords + (x % tile_width) / owords * tile_width * cpp;
+	shift_y = y % tile_height * owords;
+
+	pos = offset_y + offset_x + shift_x + shift_y;
+
+	return ptr + pos;
+}
+
 static void *yf_ptr(void *ptr,
 		    unsigned int x, unsigned int y,
 		    unsigned int stride, unsigned int cpp)
@@ -102,11 +261,39 @@ static void *yf_ptr(void *ptr,
 		(((y & ~0x1f) >> 5) * row_size);
 }
 
-static void copy_linear_to_yf(data_t *data, struct igt_buf *buf,
-			      const uint32_t *linear)
+
+typedef void *(*fn_ptr)(void *, unsigned int, unsigned int,
+			unsigned int, unsigned int);
+static fn_ptr __get_tile_fn_ptr(int tiling)
+{
+	fn_ptr fn = NULL;
+
+	switch (tiling) {
+	case I915_TILING_X:
+		fn = x_ptr;
+		break;
+	case I915_TILING_Y:
+		fn = y_ptr;
+		break;
+	case I915_TILING_Yf:
+		fn = yf_ptr;
+		break;
+	case I915_TILING_Ys:
+		/* To be implemented */
+		break;
+	}
+
+	igt_assert_f(fn, "Can't find tile function for tiling: %d\n", tiling);
+
+	return fn;
+}
+
+static void __copy_linear_to(data_t *data, struct igt_buf *buf,
+			     const uint32_t *linear, int tiling)
 {
 	int height = igt_buf_height(buf);
 	int width = igt_buf_width(buf);
+	fn_ptr fn = __get_tile_fn_ptr(tiling);
 	void *map;
 
 	gem_set_domain(data->drm_fd, buf->bo->handle,
@@ -116,8 +303,8 @@ static void copy_linear_to_yf(data_t *data, struct igt_buf *buf,
 
 	for (int y = 0; y < height; y++) {
 		for (int x = 0; x < width; x++) {
-			uint32_t *ptr = yf_ptr(map, x, y,
-					       buf->stride, buf->bpp / 8);
+			uint32_t *ptr = fn(map, x, y,
+					   buf->stride, buf->bpp / 8);
 
 			*ptr = linear[y * width + x];
 		}
@@ -126,11 +313,30 @@ static void copy_linear_to_yf(data_t *data, struct igt_buf *buf,
 	munmap(map, buf->bo->size);
 }
 
-static void copy_yf_to_linear(data_t *data, struct igt_buf *buf,
+static void copy_linear_to_x(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	__copy_linear_to(data, buf, linear, I915_TILING_X);
+}
+
+static void copy_linear_to_y(data_t *data, struct igt_buf *buf,
 			      uint32_t *linear)
+{
+	__copy_linear_to(data, buf, linear, I915_TILING_Y);
+}
+
+static void copy_linear_to_yf(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	__copy_linear_to(data, buf, linear, I915_TILING_Yf);
+}
+
+static void __copy_to_linear(data_t *data, struct igt_buf *buf,
+			     uint32_t *linear, int tiling)
 {
 	int height = igt_buf_height(buf);
 	int width = igt_buf_width(buf);
+	fn_ptr fn = __get_tile_fn_ptr(tiling);
 	void *map;
 
 	gem_set_domain(data->drm_fd, buf->bo->handle,
@@ -140,8 +346,8 @@ static void copy_yf_to_linear(data_t *data, struct igt_buf *buf,
 
 	for (int y = 0; y < height; y++) {
 		for (int x = 0; x < width; x++) {
-			uint32_t *ptr = yf_ptr(map, x, y,
-					       buf->stride, buf->bpp / 8);
+			uint32_t *ptr = fn(map, x, y,
+					   buf->stride, buf->bpp / 8);
 
 			linear[y * width + x] = *ptr;
 		}
@@ -150,8 +356,26 @@ static void copy_yf_to_linear(data_t *data, struct igt_buf *buf,
 	munmap(map, buf->bo->size);
 }
 
+static void copy_yf_to_linear(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	__copy_to_linear(data, buf, linear, I915_TILING_Yf);
+}
+
+static void copy_x_to_linear(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	__copy_to_linear(data, buf, linear, I915_TILING_X);
+}
+
+static void copy_y_to_linear(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	__copy_to_linear(data, buf, linear, I915_TILING_Y);
+}
+
 static void copy_linear_to_gtt(data_t *data, struct igt_buf *buf,
-			       const uint32_t *linear)
+			       uint32_t *linear)
 {
 	void *map;
 
@@ -175,6 +399,38 @@ static void copy_gtt_to_linear(data_t *data, struct igt_buf *buf,
 		       I915_GEM_DOMAIN_GTT, 0);
 
 	map = gem_mmap__gtt(data->drm_fd, buf->bo->handle,
+			   buf->bo->size, PROT_READ);
+
+	igt_memcpy_from_wc(linear, map, buf->bo->size);
+
+	munmap(map, buf->bo->size);
+}
+
+static void copy_linear_to_wc(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	void *map;
+
+	gem_set_domain(data->drm_fd, buf->bo->handle,
+		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	map = gem_mmap__wc(data->drm_fd, buf->bo->handle, 0,
+			   buf->bo->size, PROT_READ | PROT_WRITE);
+
+	memcpy(map, linear, buf->bo->size);
+
+	munmap(map, buf->bo->size);
+}
+
+static void copy_wc_to_linear(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	void *map;
+
+	gem_set_domain(data->drm_fd, buf->bo->handle,
+		       I915_GEM_DOMAIN_GTT, 0);
+
+	map = gem_mmap__wc(data->drm_fd, buf->bo->handle, 0,
 			    buf->bo->size, PROT_READ);
 
 	igt_memcpy_from_wc(linear, map, buf->bo->size);
@@ -182,19 +438,90 @@ static void copy_gtt_to_linear(data_t *data, struct igt_buf *buf,
 	munmap(map, buf->bo->size);
 }
 
-static void *linear_copy(data_t *data, struct igt_buf *buf)
+static void copy_linear_to_cpu(data_t *data, struct igt_buf *buf,
+			       uint32_t *linear)
 {
-	void *linear;
+	void *map;
+
+	gem_set_domain(data->drm_fd, buf->bo->handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 
-	/* 16B alignment allows to potentially make use of SSE4 for copying */
-	igt_assert_eq(posix_memalign(&linear, 16, buf->bo->size), 0);
+	map = gem_mmap__cpu(data->drm_fd, buf->bo->handle, 0,
+			    buf->bo->size, PROT_READ | PROT_WRITE);
 
-	if (buf->tiling == I915_TILING_Yf)
-		copy_yf_to_linear(data, buf, linear);
-	else
-		copy_gtt_to_linear(data, buf, linear);
+	memcpy(map, linear, buf->bo->size);
 
-	return linear;
+	munmap(map, buf->bo->size);
+}
+
+static void copy_cpu_to_linear(data_t *data, struct igt_buf *buf,
+			       uint32_t *linear)
+{
+	void *map;
+
+	gem_set_domain(data->drm_fd, buf->bo->handle,
+		       I915_GEM_DOMAIN_CPU, 0);
+
+	map = gem_mmap__cpu(data->drm_fd, buf->bo->handle, 0,
+			    buf->bo->size, PROT_READ);
+
+	memcpy(linear, map, buf->bo->size);
+
+	munmap(map, buf->bo->size);
+}
+
+static void copy_bo_to_linear(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	switch (buf->tiling) {
+	case I915_TILING_NONE:
+		igt_assert(data->format->to_linear);
+		data->format->to_linear(data, buf, linear);
+		break;
+	case I915_TILING_X:
+		igt_assert(data->format->x_to_linear);
+		data->format->x_to_linear(data, buf, linear);
+		break;
+	case I915_TILING_Y:
+		igt_assert(data->format->y_to_linear);
+		data->format->y_to_linear(data, buf, linear);
+		break;
+	case I915_TILING_Yf:
+		igt_assert(data->format->yf_to_linear);
+		data->format->yf_to_linear(data, buf, linear);
+		break;
+	case I915_TILING_Ys:
+		igt_assert(data->format->ys_to_linear);
+		data->format->ys_to_linear(data, buf, linear);
+		break;
+	}
+}
+
+static void copy_linear_to_bo(data_t *data, struct igt_buf *buf,
+			      uint32_t *linear)
+{
+	switch (buf->tiling) {
+	case I915_TILING_NONE:
+		igt_assert(data->format->linear_to);
+		data->format->linear_to(data, buf, linear);
+		break;
+	case I915_TILING_X:
+		igt_assert(data->format->linear_to_x);
+		data->format->linear_to_x(data, buf, linear);
+		break;
+	case I915_TILING_Y:
+		igt_assert(data->format->linear_to_y);
+		data->format->linear_to_y(data, buf, linear);
+		break;
+	case I915_TILING_Yf:
+		igt_assert(data->format->linear_to_yf);
+		data->format->linear_to_yf(data, buf, linear);
+		break;
+	case I915_TILING_Ys:
+		igt_assert(data->format->linear_to_ys);
+		data->format->linear_to_ys(data, buf, linear);
+		break;
+	}
 }
 
 static void scratch_buf_write_to_png(data_t *data, struct igt_buf *buf,
@@ -204,7 +531,8 @@ static void scratch_buf_write_to_png(data_t *data, struct igt_buf *buf,
 	cairo_status_t ret;
 	void *linear;
 
-	linear = linear_copy(data, buf);
+	linear = alloc_aligned(buf->bo->size);
+	copy_bo_to_linear(data, buf, linear);
 
 	surface = cairo_image_surface_create_for_data(linear,
 						      CAIRO_FORMAT_RGB24,
@@ -250,12 +578,12 @@ static void *linear_copy_aux(data_t *data, struct igt_buf *buf)
 	int aux_size = scratch_buf_aux_width(data->devid, buf) *
 		scratch_buf_aux_height(data->devid, buf);
 
-	igt_assert_eq(posix_memalign(&linear, 16, aux_size), 0);
+	linear = alloc_aligned(aux_size);
 
 	gem_set_domain(data->drm_fd, buf->bo->handle,
 		       I915_GEM_DOMAIN_GTT, 0);
 
-	map = gem_mmap__gtt(data->drm_fd, buf->bo->handle,
+	map = gem_mmap__wc(data->drm_fd, buf->bo->handle, 0,
 			    buf->bo->size, PROT_READ);
 
 	igt_memcpy_from_wc(linear, map + buf->aux.offset, aux_size);
@@ -297,7 +625,7 @@ static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
 	cairo_t *cr;
 	void *linear;
 
-	linear = linear_copy(data, buf);
+	linear = alloc_aligned(buf->bo->size);
 
 	surface = cairo_image_surface_create_for_data(linear,
 						      CAIRO_FORMAT_RGB24,
@@ -338,10 +666,7 @@ static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
 
 	cairo_surface_destroy(surface);
 
-	if (buf->tiling == I915_TILING_Yf)
-		copy_linear_to_yf(data, buf, linear);
-	else
-		copy_linear_to_gtt(data, buf, linear);
+	copy_linear_to_bo(data, buf, linear);
 
 	free(linear);
 }
@@ -354,6 +679,7 @@ scratch_buf_copy(data_t *data,
 	int width = igt_buf_width(dst);
 	int height  = igt_buf_height(dst);
 	uint32_t *linear_dst;
+	uint32_t *linear_src;
 
 	igt_assert_eq(igt_buf_width(dst), igt_buf_width(src));
 	igt_assert_eq(igt_buf_height(dst), igt_buf_height(src));
@@ -366,49 +692,20 @@ scratch_buf_copy(data_t *data,
 	h = min(h, height - sy);
 	h = min(h, height - dy);
 
-	gem_set_domain(data->drm_fd, dst->bo->handle,
-		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-	linear_dst = gem_mmap__gtt(data->drm_fd, dst->bo->handle,
-				   dst->bo->size, PROT_WRITE);
-
-	if (src->tiling == I915_TILING_Yf) {
-		void *map;
-
-		gem_set_domain(data->drm_fd, src->bo->handle,
-			       I915_GEM_DOMAIN_CPU, 0);
-		map = gem_mmap__cpu(data->drm_fd, src->bo->handle, 0,
-				    src->bo->size, PROT_READ);
-
-		for (int y = 0; y < h; y++) {
-			for (int x = 0; x < w; x++) {
-				const uint32_t *ptr = yf_ptr(map, sx+x, sy+y,
-							     src->stride,
-							     src->bpp / 8);
-
-				linear_dst[(dy+y) * width + dx+x] = *ptr;
-			}
-		}
+	linear_dst = alloc_aligned(dst->bo->size);
+	linear_src = alloc_aligned(src->bo->size);
+	copy_bo_to_linear(data, src, linear_src);
+	copy_bo_to_linear(data, dst, linear_dst);
 
-		munmap(map, src->bo->size);
-	} else {
-		uint32_t *linear_src;
-
-		gem_set_domain(data->drm_fd, src->bo->handle,
-			       I915_GEM_DOMAIN_GTT, 0);
-
-		linear_src = gem_mmap__gtt(data->drm_fd, src->bo->handle,
-					   src->bo->size, PROT_READ);
-
-		for (int y = 0; y < h; y++) {
-			igt_memcpy_from_wc(&linear_dst[(dy+y) * width + dx],
-					   &linear_src[(sy+y) * width + sx],
-					   w * (src->bpp / 8));
-		}
-
-		munmap(linear_src, src->bo->size);
+	for (int y = 0; y < h; y++) {
+		memcpy(&linear_dst[(dy+y) * width + dx],
+				&linear_src[(sy+y) * width + sx],
+				w * (src->bpp / 8));
 	}
+	free(linear_src);
 
-	munmap(linear_dst, dst->bo->size);
+	copy_linear_to_bo(data, dst, linear_dst);
+	free(linear_dst);
 }
 
 static void scratch_buf_init(data_t *data, struct igt_buf *buf,
@@ -424,6 +721,7 @@ static void scratch_buf_init(data_t *data, struct igt_buf *buf,
 	if (ccs) {
 		int aux_width, aux_height;
 		int size;
+		uint32_t tile_mask = (1 << req_tiling);
 
 		igt_require(intel_gen(data->devid) >= 9);
 		igt_assert(tiling == I915_TILING_Y ||
@@ -457,31 +755,40 @@ static void scratch_buf_init(data_t *data, struct igt_buf *buf,
 
 		buf->bo = drm_intel_bo_alloc(data->bufmgr, "", size, 4096);
 
-		if (tiling == I915_TILING_Y) {
+		if (tiling == I915_TILING_Y &&
+				data->format->hw_tiles & tile_mask) {
 			drm_intel_bo_set_tiling(buf->bo, &tiling, buf->stride);
 			igt_assert_eq(tiling, req_tiling);
 		}
-	} else if (req_tiling == I915_TILING_Yf) {
-		int size;
+	} else {
+		uint32_t tile_mask = (1 << req_tiling);
+
+		/* Requested bo must be hw tiled */
+		if (data->format->hw_tiles & tile_mask) {
+			buf->bo = drm_intel_bo_alloc_tiled(data->bufmgr, "",
+							   width, height,
+							   bpp / 8,
+							   &tiling, &pitch, 0);
+			igt_assert_eq(tiling, req_tiling);
 
-		buf->stride = ALIGN(width * (bpp / 8), 128);
-		buf->size = buf->stride * height;
-		buf->tiling = tiling;
-		buf->bpp = bpp;
+			buf->stride = pitch;
+			buf->tiling = tiling;
+			buf->size = pitch * height;
+			buf->bpp = bpp;
+		/* Use bo alloc and software tiling/detiling */
+		} else {
+			int size;
 
-		size = buf->stride * ALIGN(height, 32);
+			buf->stride = ALIGN(width * (bpp / 8), 128);
+			buf->size = buf->stride * height;
+			buf->tiling = tiling;
+			buf->bpp = bpp;
 
-		buf->bo = drm_intel_bo_alloc(data->bufmgr, "", size, 4096);
-	} else {
-		buf->bo = drm_intel_bo_alloc_tiled(data->bufmgr, "",
-						   width, height, bpp / 8,
-						   &tiling, &pitch, 0);
-		igt_assert_eq(tiling, req_tiling);
+			size = buf->stride * ALIGN(height, 32);
 
-		buf->stride = pitch;
-		buf->tiling = tiling;
-		buf->size = pitch * height;
-		buf->bpp = bpp;
+			buf->bo = drm_intel_bo_alloc(data->bufmgr, "", size,
+						     4096);
+		}
 	}
 
 	igt_assert(igt_buf_width(buf) == width);
@@ -507,11 +814,13 @@ scratch_buf_check(data_t *data,
 	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
 	igt_assert_eq(buf->bo->size, ref->bo->size);
 
-	linear = linear_copy(data, buf);
+	linear = alloc_aligned(buf->bo->size);
+	copy_bo_to_linear(data, buf, linear);
 	buf_val = linear[y * width + x];
 	free(linear);
 
-	linear = linear_copy(data, ref);
+	linear = alloc_aligned(ref->bo->size);
+	copy_bo_to_linear(data, buf, linear);
 	ref_val = linear[y * width + x];
 	free(linear);
 
@@ -533,8 +842,10 @@ scratch_buf_check_all(data_t *data,
 	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
 	igt_assert_eq(buf->bo->size, ref->bo->size);
 
-	linear_buf = linear_copy(data, buf);
-	linear_ref = linear_copy(data, ref);
+	linear_buf = alloc_aligned(buf->bo->size);
+	linear_ref = alloc_aligned(ref->bo->size);
+	copy_bo_to_linear(data, buf, linear_buf);
+	copy_bo_to_linear(data, ref, linear_ref);
 
 	for (int y = 0; y < height; y++) {
 		for (int x = 0; x < width; x++) {
@@ -627,6 +938,7 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 
 	for (int i = 0; i < num_src; i++)
 		scratch_buf_init(data, &src[i].buf, WIDTH, HEIGHT, src[i].tiling, false);
+
 	scratch_buf_init(data, &dst, WIDTH, HEIGHT, dst_tiling, false);
 	if (src_compressed)
 		scratch_buf_init(data, &src_ccs, WIDTH, HEIGHT,
@@ -639,7 +951,8 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 	for (int i = 0; i < num_src; i++)
 		scratch_buf_draw_pattern(data, &src[i].buf,
 					 0, 0, WIDTH, HEIGHT,
-					 0, 0, WIDTH, HEIGHT, true);
+					 0, 0, WIDTH, HEIGHT, (i % 2));
+
 	scratch_buf_draw_pattern(data, &dst,
 				 0, 0, WIDTH, HEIGHT,
 				 0, 0, WIDTH, HEIGHT, false);
@@ -826,6 +1139,8 @@ igt_main_args("da", NULL, help_str, opt_handler, NULL)
 		data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
 		igt_assert(data.batch);
 
+		data.format = get_gen_format(intel_gen(data.devid));
+
 		igt_fork_hang_detector(data.drm_fd);
 	}
 
-- 
2.23.0



More information about the igt-dev mailing list