[igt-dev] [PATCH i-g-t v3 16/22] tests/prime_mmap_coherency.c: Remove libdrm dependency
Dominik Grzegorzek
dominik.grzegorzek at intel.com
Fri Sep 18 10:58:50 UTC 2020
Use intel_bb / intel_buf to remove libdrm dependency.
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
tests/prime_mmap_coherency.c | 110 +++++++++++++++++++----------------
1 file changed, 60 insertions(+), 50 deletions(-)
diff --git a/tests/prime_mmap_coherency.c b/tests/prime_mmap_coherency.c
index 2a074957..0c007f43 100644
--- a/tests/prime_mmap_coherency.c
+++ b/tests/prime_mmap_coherency.c
@@ -36,8 +36,8 @@ IGT_TEST_DESCRIPTION("Test dma-buf mmap on !llc platforms mostly and provoke"
" coherency bugs so we know for sure where we need the sync ioctls.");
int fd;
-static drm_intel_bufmgr *bufmgr;
-struct intel_batchbuffer *batch;
+static struct buf_ops *bops;
+static struct intel_bb *ibb;
static int width = 1024, height = 1024;
/*
@@ -49,29 +49,33 @@ static int width = 1024, height = 1024;
*/
static int test_read_flush(void)
{
- drm_intel_bo *bo_1;
- drm_intel_bo *bo_2;
+ struct intel_buf *buf_1;
+ struct intel_buf *buf_2;
uint32_t *ptr_cpu;
uint32_t *ptr_gtt;
int dma_buf_fd, i;
int stale = 0;
- bo_1 = drm_intel_bo_alloc(bufmgr, "BO 1", width * height * 4, 4096);
-
+ buf_1 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
/* STEP #1: put the BO 1 in GTT domain. We use the blitter to copy and fill
* zeros to BO 1, so commands will be submitted and likely to place BO 1 in
* the GTT domain. */
- bo_2 = drm_intel_bo_alloc(bufmgr, "BO 2", width * height * 4, 4096);
- intel_copy_bo(batch, bo_1, bo_2, width * height);
- drm_intel_bo_unreference(bo_2);
+ buf_2 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ intel_bb_blt_copy(ibb, buf_2, 0, 0, 4096, buf_1, 0, 0, 4096,
+ 4096/4, (width * height)/4096, 32);
+ intel_buf_destroy(buf_2);
/* STEP #2: read BO 1 using the dma-buf CPU mmap. This dirties the CPU caches. */
- dma_buf_fd = prime_handle_to_fd_for_mmap(fd, bo_1->handle);
+ dma_buf_fd = prime_handle_to_fd_for_mmap(fd, buf_1->handle);
/* STEP #3: write 0x11 into BO 1. */
- bo_2 = drm_intel_bo_alloc(bufmgr, "BO 2", width * height * 4, 4096);
- ptr_gtt = gem_mmap__device_coherent(fd, bo_2->handle, 0, width * height, PROT_READ | PROT_WRITE);
- gem_set_domain(fd, bo_2->handle,
+ buf_2 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ ptr_gtt = gem_mmap__device_coherent(fd, buf_2->handle, 0, width * height,
+ PROT_READ | PROT_WRITE);
+ gem_set_domain(fd, buf_2->handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memset(ptr_gtt, 0xc5, width * height);
munmap(ptr_gtt, width * height);
@@ -85,8 +89,9 @@ static int test_read_flush(void)
igt_assert_eq(ptr_cpu[i], 0);
prime_sync_end(dma_buf_fd, false);
- intel_copy_bo(batch, bo_1, bo_2, width * height);
- drm_intel_bo_unreference(bo_2);
+ intel_bb_blt_copy(ibb, buf_2, 0, 0, 4096, buf_1, 0, 0, 4096,
+ 4096/4, (width * height)/4096, 32);
+ intel_buf_destroy(buf_2);
/* STEP #4: read again using the CPU mmap. Doing #1 before #3 makes sure we
* don't do a full CPU cache flush in step #3 again. That makes sure all the
@@ -99,7 +104,7 @@ static int test_read_flush(void)
stale++;
prime_sync_end(dma_buf_fd, false);
- drm_intel_bo_unreference(bo_1);
+ intel_buf_destroy(buf_1);
munmap(ptr_cpu, width * height);
close(dma_buf_fd);
@@ -116,24 +121,27 @@ static int test_read_flush(void)
*/
static int test_write_flush(void)
{
- drm_intel_bo *bo_1;
- drm_intel_bo *bo_2;
+ struct intel_buf *buf_1;
+ struct intel_buf *buf_2;
uint32_t *ptr_cpu;
uint32_t *ptr2_cpu;
int dma_buf_fd, dma_buf2_fd, i;
int stale = 0;
- bo_1 = drm_intel_bo_alloc(bufmgr, "BO 1", width * height * 4, 4096);
+ buf_1 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
/* STEP #1: Put the BO 1 in GTT domain. We use the blitter to copy and fill
* zeros to BO 1, so commands will be submitted and likely to place BO 1 in
* the GTT domain. */
- bo_2 = drm_intel_bo_alloc(bufmgr, "BO 2", width * height * 4, 4096);
- intel_copy_bo(batch, bo_1, bo_2, width * height);
- drm_intel_bo_unreference(bo_2);
+ buf_2 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ intel_bb_blt_copy(ibb, buf_2, 0, 0, 4096, buf_1, 0, 0, 4096,
+ 4096/4, (width * height)/4096, 32);
+ intel_buf_destroy(buf_2);
/* STEP #2: Write '1's into BO 1 using the dma-buf CPU mmap. */
- dma_buf_fd = prime_handle_to_fd_for_mmap(fd, bo_1->handle);
+ dma_buf_fd = prime_handle_to_fd_for_mmap(fd, buf_1->handle);
igt_skip_on(errno == EINVAL);
ptr_cpu = mmap(NULL, width * height, PROT_READ | PROT_WRITE,
@@ -147,13 +155,15 @@ static int test_write_flush(void)
prime_sync_end(dma_buf_fd, true);
/* STEP #3: Copy BO 1 into BO 2, using blitter. */
- bo_2 = drm_intel_bo_alloc(bufmgr, "BO 2", width * height * 4, 4096);
- intel_copy_bo(batch, bo_2, bo_1, width * height);
+ buf_2 = intel_buf_create(bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ intel_bb_blt_copy(ibb, buf_1, 0, 0, 4096, buf_2, 0, 0, 4096,
+ 4096/4, (width * height)/4096, 32);
/* STEP #4: compare BO 2 against written BO 1. In !llc hardware, there
* should be some cache lines that didn't get flushed out and are still 0,
* requiring cache flush before the write in step 2. */
- dma_buf2_fd = prime_handle_to_fd_for_mmap(fd, bo_2->handle);
+ dma_buf2_fd = prime_handle_to_fd_for_mmap(fd, buf_2->handle);
igt_skip_on(errno == EINVAL);
ptr2_cpu = mmap(NULL, width * height, PROT_READ | PROT_WRITE,
@@ -168,8 +178,8 @@ static int test_write_flush(void)
prime_sync_end(dma_buf2_fd, false);
- drm_intel_bo_unreference(bo_1);
- drm_intel_bo_unreference(bo_2);
+ intel_buf_destroy(buf_1);
+ intel_buf_destroy(buf_2);
munmap(ptr_cpu, width * height);
close(dma_buf2_fd);
@@ -180,33 +190,32 @@ static int test_write_flush(void)
static void blit_and_cmp(void)
{
- drm_intel_bo *bo_1;
- drm_intel_bo *bo_2;
+ struct intel_buf *buf_1;
+ struct intel_buf *buf_2;
uint32_t *ptr_cpu;
uint32_t *ptr2_cpu;
int dma_buf_fd, dma_buf2_fd, i;
int local_fd;
- drm_intel_bufmgr *local_bufmgr;
- struct intel_batchbuffer *local_batch;
-
+ struct buf_ops *local_bops;
+ struct intel_bb *local_ibb;
/* recreate process local variables */
local_fd = drm_open_driver(DRIVER_INTEL);
- local_bufmgr = drm_intel_bufmgr_gem_init(local_fd, 4096);
- igt_assert(local_bufmgr);
+ local_bops = buf_ops_create(local_fd);
- local_batch = intel_batchbuffer_alloc(local_bufmgr, intel_get_drm_devid(local_fd));
- igt_assert(local_batch);
+ local_ibb = intel_bb_create(local_fd, 4096);
- bo_1 = drm_intel_bo_alloc(local_bufmgr, "BO 1", width * height * 4, 4096);
- dma_buf_fd = prime_handle_to_fd_for_mmap(local_fd, bo_1->handle);
+ buf_1 = intel_buf_create(local_bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ dma_buf_fd = prime_handle_to_fd_for_mmap(local_fd, buf_1->handle);
igt_skip_on(errno == EINVAL);
ptr_cpu = mmap(NULL, width * height, PROT_READ | PROT_WRITE,
MAP_SHARED, dma_buf_fd, 0);
igt_assert(ptr_cpu != MAP_FAILED);
- bo_2 = drm_intel_bo_alloc(local_bufmgr, "BO 2", width * height * 4, 4096);
- dma_buf2_fd = prime_handle_to_fd_for_mmap(local_fd, bo_2->handle);
+ buf_2 = intel_buf_create(local_bops, width, height, 32, 4096,
+ I915_TILING_NONE, I915_COMPRESSION_NONE);
+ dma_buf2_fd = prime_handle_to_fd_for_mmap(local_fd, buf_2->handle);
ptr2_cpu = mmap(NULL, width * height, PROT_READ | PROT_WRITE,
MAP_SHARED, dma_buf2_fd, 0);
@@ -222,7 +231,8 @@ static void blit_and_cmp(void)
prime_sync_end(dma_buf2_fd, true);
/* Copy BO 1 into BO 2, using blitter. */
- intel_copy_bo(local_batch, bo_2, bo_1, width * height);
+ intel_bb_blt_copy(local_ibb, buf_1, 0, 0, 4096, buf_2, 0, 0, 4096,
+ 4096/4, (width * height)/4096, 32);
usleep(0); /* let someone else claim the mutex */
/* Compare BOs. If prime_sync_* were executed properly, the caches
@@ -232,16 +242,16 @@ static void blit_and_cmp(void)
igt_fail_on_f(ptr2_cpu[i] != 0x11111111, "Found 0x%08x at offset 0x%08x\n", ptr2_cpu[i], i);
prime_sync_end(dma_buf2_fd, false);
- drm_intel_bo_unreference(bo_1);
- drm_intel_bo_unreference(bo_2);
+ intel_buf_destroy(buf_1);
+ intel_buf_destroy(buf_2);
munmap(ptr_cpu, width * height);
munmap(ptr2_cpu, width * height);
close(dma_buf_fd);
close(dma_buf2_fd);
- intel_batchbuffer_free(local_batch);
- drm_intel_bufmgr_destroy(local_bufmgr);
+ intel_bb_destroy(local_ibb);
+ buf_ops_destroy(local_bops);
close(local_fd);
}
@@ -286,8 +296,8 @@ igt_main
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ bops = buf_ops_create(fd);
+ ibb = intel_bb_create(fd, 4096);
}
/* Cache coherency and the eviction are pretty much unpredictable, so
@@ -315,8 +325,8 @@ igt_main
}
igt_fixture {
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ intel_bb_destroy(ibb);
+ buf_ops_destroy(bops);
close(fd);
}
--
2.20.1
More information about the igt-dev
mailing list