[Intel-gfx] [PATCH 6/6] lib: switch intel_copy_bo to directly take a size
Daniel Vetter
daniel.vetter at ffwll.ch
Thu Mar 13 19:02:51 CET 2014
Instead of a width/height combination. Since I've been lazy with the
math this now only accepts page-aligned copy operations, but that's
all we need really.
Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
lib/intel_batchbuffer.c | 21 +++++++++------------
lib/intel_batchbuffer.h | 2 +-
tests/gem_concurrent_blit.c | 8 ++++----
tests/gem_pread_after_blit.c | 22 +++++++++++-----------
tests/gem_ring_sync_copy.c | 4 ++--
tests/gem_seqno_wrap.c | 4 ++--
tests/gem_tiled_blits.c | 10 +++++-----
tests/gem_tiled_fence_blits.c | 4 ++--
tests/prime_nv_test.c | 2 +-
9 files changed, 37 insertions(+), 40 deletions(-)
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 931cfca809de..195f1b29925d 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -366,24 +366,21 @@ intel_blt_copy(struct intel_batchbuffer *batch,
* @batch: batchbuffer object
* @src_bo: source libdrm buffer object
* @dst_bo: destination libdrm buffer object
- * @width: width of the copied area in 4-byte pixels
- * @height: height of the copied area in lines
+ * @size: size of the copy range in bytes
*
* This emits a copy operation using blitter commands into the supplied batch
- * buffer object. A total of @width times @height bytes from the start of
- * @src_bo is copied over to @dst_bo.
- *
- * FIXME: We need @width and @height to avoid hitting into platform specific
- * of the blitter. It would be easier to just accept a size and do the math
- * ourselves.
+ * buffer object. A total of @size bytes from the start of @src_bo is copied
+ * over to @dst_bo. Note that @size must be page-aligned.
*/
void
intel_copy_bo(struct intel_batchbuffer *batch,
drm_intel_bo *dst_bo, drm_intel_bo *src_bo,
- int width, int height)
+ long int size)
{
+ assert(size % 4096 == 0);
+
intel_blt_copy(batch,
- src_bo, 0, 0, width * 4,
- dst_bo, 0, 0, width * 4,
- width, height, 32);
+ src_bo, 0, 0, 4096,
+ dst_bo, 0, 0, 4096,
+ 4096/4, size/4096, 32);
}
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 65a21d781000..10088c2ecc3c 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -193,6 +193,6 @@ intel_blt_copy(struct intel_batchbuffer *batch,
int width, int height, int bpp);
void intel_copy_bo(struct intel_batchbuffer *batch,
drm_intel_bo *dst_bo, drm_intel_bo *src_bo,
- int width, int height);
+ long int size);
#endif
diff --git a/tests/gem_concurrent_blit.c b/tests/gem_concurrent_blit.c
index 09616e572fde..254bc4c265a3 100644
--- a/tests/gem_concurrent_blit.c
+++ b/tests/gem_concurrent_blit.c
@@ -204,7 +204,7 @@ static void do_overwrite_source(struct access_mode *mode,
mode->set_bo(dst[i], i, width, height);
}
for (i = 0; i < num_buffers; i++)
- intel_copy_bo(batch, dst[i], src[i], width, height);
+ intel_copy_bo(batch, dst[i], src[i], width*height*4);
for (i = num_buffers; i--; )
mode->set_bo(src[i], 0xdeadbeef, width, height);
for (i = 0; i < num_buffers; i++)
@@ -221,7 +221,7 @@ static void do_early_read(struct access_mode *mode,
for (i = num_buffers; i--; )
mode->set_bo(src[i], 0xdeadbeef, width, height);
for (i = 0; i < num_buffers; i++)
- intel_copy_bo(batch, dst[i], src[i], width, height);
+ intel_copy_bo(batch, dst[i], src[i], width*height*4);
for (i = num_buffers; i--; )
mode->cmp_bo(dst[i], 0xdeadbeef, width, height);
}
@@ -236,9 +236,9 @@ static void do_gpu_read_after_write(struct access_mode *mode,
for (i = num_buffers; i--; )
mode->set_bo(src[i], 0xabcdabcd, width, height);
for (i = 0; i < num_buffers; i++)
- intel_copy_bo(batch, dst[i], src[i], width, height);
+ intel_copy_bo(batch, dst[i], src[i], width*height*4);
for (i = num_buffers; i--; )
- intel_copy_bo(batch, dummy, dst[i], width, height);
+ intel_copy_bo(batch, dummy, dst[i], width*height*4);
for (i = num_buffers; i--; )
mode->cmp_bo(dst[i], 0xabcdabcd, width, height);
}
diff --git a/tests/gem_pread_after_blit.c b/tests/gem_pread_after_blit.c
index 4faf104e2b4e..eb8fd3a4854f 100644
--- a/tests/gem_pread_after_blit.c
+++ b/tests/gem_pread_after_blit.c
@@ -138,31 +138,31 @@ static void do_test(int fd, int cache_level,
do {
/* First, do a full-buffer read after blitting */
- intel_copy_bo(batch, tmp[0], src[0], width, height);
+ intel_copy_bo(batch, tmp[0], src[0], width*height*4);
verify_large_read(tmp[0], start[0]);
- intel_copy_bo(batch, tmp[0], src[1], width, height);
+ intel_copy_bo(batch, tmp[0], src[1], width*height*4);
verify_large_read(tmp[0], start[1]);
- intel_copy_bo(batch, tmp[0], src[0], width, height);
+ intel_copy_bo(batch, tmp[0], src[0], width*height*4);
verify_small_read(tmp[0], start[0]);
- intel_copy_bo(batch, tmp[0], src[1], width, height);
+ intel_copy_bo(batch, tmp[0], src[1], width*height*4);
verify_small_read(tmp[0], start[1]);
- intel_copy_bo(batch, tmp[0], src[0], width, height);
+ intel_copy_bo(batch, tmp[0], src[0], width*height*4);
verify_large_read(tmp[0], start[0]);
- intel_copy_bo(batch, tmp[0], src[0], width, height);
- intel_copy_bo(batch, tmp[1], src[1], width, height);
+ intel_copy_bo(batch, tmp[0], src[0], width*height*4);
+ intel_copy_bo(batch, tmp[1], src[1], width*height*4);
verify_large_read(tmp[0], start[0]);
verify_large_read(tmp[1], start[1]);
- intel_copy_bo(batch, tmp[0], src[0], width, height);
- intel_copy_bo(batch, tmp[1], src[1], width, height);
+ intel_copy_bo(batch, tmp[0], src[0], width*height*4);
+ intel_copy_bo(batch, tmp[1], src[1], width*height*4);
verify_large_read(tmp[1], start[1]);
verify_large_read(tmp[0], start[0]);
- intel_copy_bo(batch, tmp[1], src[0], width, height);
- intel_copy_bo(batch, tmp[0], src[1], width, height);
+ intel_copy_bo(batch, tmp[1], src[0], width*height*4);
+ intel_copy_bo(batch, tmp[0], src[1], width*height*4);
verify_large_read(tmp[0], start[1]);
verify_large_read(tmp[1], start[0]);
} while (--loop);
diff --git a/tests/gem_ring_sync_copy.c b/tests/gem_ring_sync_copy.c
index 8ea3329a8a9c..2cd78a32d8c2 100644
--- a/tests/gem_ring_sync_copy.c
+++ b/tests/gem_ring_sync_copy.c
@@ -241,7 +241,7 @@ static void blitter_busy(data_t *data)
intel_copy_bo(data->batch,
data->blitter.srcs[i],
data->blitter.dsts[i],
- WIDTH, HEIGHT);
+ WIDTH*HEIGHT*4);
}
}
@@ -262,7 +262,7 @@ static void blitter_busy_fini(data_t *data)
static void blitter_copy(data_t *data, drm_intel_bo *src, drm_intel_bo *dst)
{
- intel_copy_bo(data->batch, dst, src, WIDTH, HEIGHT);
+ intel_copy_bo(data->batch, dst, src, WIDTH*HEIGHT*4);
}
struct ring_ops {
diff --git a/tests/gem_seqno_wrap.c b/tests/gem_seqno_wrap.c
index 0d90df6b7db0..79562693d250 100644
--- a/tests/gem_seqno_wrap.c
+++ b/tests/gem_seqno_wrap.c
@@ -160,7 +160,7 @@ static void render_copyfunc(struct scratch_buf *src,
}
igt_assert(dst->bo);
igt_assert(src->bo);
- intel_copy_bo(batch_blt, dst->bo, src->bo, width, height);
+ intel_copy_bo(batch_blt, dst->bo, src->bo, width*height*4);
intel_batchbuffer_flush(batch_blt);
}
}
@@ -252,7 +252,7 @@ static int run_sync_test(int num_buffers, bool verify)
if (verify) {
for (i = 0; i < num_buffers; i++)
intel_copy_bo(batch_blt, dst2[p_dst2[i]], dst1[p_dst1[i]],
- width, height);
+ width*height*4);
for (i = 0; i < num_buffers; i++) {
r = cmp_bo(dst2[p_dst2[i]], i, width, height);
diff --git a/tests/gem_tiled_blits.c b/tests/gem_tiled_blits.c
index 2b8b74d462df..e21fc7894c0f 100644
--- a/tests/gem_tiled_blits.c
+++ b/tests/gem_tiled_blits.c
@@ -81,7 +81,7 @@ create_bo(uint32_t start_val)
linear[i] = start_val++;
drm_intel_bo_unmap(linear_bo);
- intel_copy_bo (batch, bo, linear_bo, width, height);
+ intel_copy_bo (batch, bo, linear_bo, width*height*4);
drm_intel_bo_unreference(linear_bo);
@@ -97,7 +97,7 @@ check_bo(drm_intel_bo *bo, uint32_t start_val)
linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
- intel_copy_bo(batch, linear_bo, bo, width, height);
+ intel_copy_bo(batch, linear_bo, bo, width*height*4);
do_or_die(drm_intel_bo_map(linear_bo, 0));
linear = linear_bo->virtual;
@@ -143,7 +143,7 @@ static void run_test(int count)
if (src == dst)
continue;
- intel_copy_bo(batch, bo[dst], bo[src], width, height);
+ intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
bo_start_val[dst] = bo_start_val[src];
}
for (i = 0; i < count; i++)
@@ -165,7 +165,7 @@ static void run_test(int count)
if (src == dst)
continue;
- intel_copy_bo(batch, bo[dst], bo[src], width, height);
+ intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
bo_start_val[dst] = bo_start_val[src];
}
for (i = 0; i < count; i++)
@@ -179,7 +179,7 @@ static void run_test(int count)
if (src == dst)
continue;
- intel_copy_bo(batch, bo[dst], bo[src], width, height);
+ intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
bo_start_val[dst] = bo_start_val[src];
}
for (i = 0; i < count; i++) {
diff --git a/tests/gem_tiled_fence_blits.c b/tests/gem_tiled_fence_blits.c
index c72d25f91bbb..b33be58c3e1b 100644
--- a/tests/gem_tiled_fence_blits.c
+++ b/tests/gem_tiled_fence_blits.c
@@ -136,7 +136,7 @@ igt_simple_main
for (i = 0; i < count; i++) {
int src = count - i - 1;
- intel_copy_bo(batch, bo[i], bo[src], width, height);
+ intel_copy_bo(batch, bo[i], bo[src], width*height*4);
bo_start_val[i] = bo_start_val[src];
}
@@ -147,7 +147,7 @@ igt_simple_main
if (src == dst)
continue;
- intel_copy_bo(batch, bo[dst], bo[src], width, height);
+ intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
bo_start_val[dst] = bo_start_val[src];
/*
diff --git a/tests/prime_nv_test.c b/tests/prime_nv_test.c
index 8a6354020b7c..640515b4bebb 100644
--- a/tests/prime_nv_test.c
+++ b/tests/prime_nv_test.c
@@ -336,7 +336,7 @@ static void test_i915_blt_fill_nv_read(void)
igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
close(prime_fd);
- intel_copy_bo(intel_batch, test_intel_bo, src_bo, 256, 1024/4);
+ intel_copy_bo(intel_batch, test_intel_bo, src_bo, BO_SIZE);
igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
--
1.8.5.2
More information about the Intel-gfx
mailing list