[igt-dev] [PATCH i-g-t v3 07/15] i915/gem_unfence_active_buffers.c: Remove librdm dependency
Dominik Grzegorzek
dominik.grzegorzek at intel.com
Fri Oct 2 06:54:34 UTC 2020
Use intel_bb / intel_buf to remove libdrm dependency.
The difference between offsets proposed from kernel using intel_bb
instead of libdrm is caused by EXEC_OBJECT_SUPPORTS_48B_ADDRESS set on
every object.
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
tests/i915/gem_unfence_active_buffers.c | 125 +++++++++---------------
1 file changed, 46 insertions(+), 79 deletions(-)
diff --git a/tests/i915/gem_unfence_active_buffers.c b/tests/i915/gem_unfence_active_buffers.c
index 1e69c70d..dd92c4ab 100644
--- a/tests/i915/gem_unfence_active_buffers.c
+++ b/tests/i915/gem_unfence_active_buffers.c
@@ -51,24 +51,24 @@
#include "drm.h"
#include "i915/gem.h"
#include "igt.h"
-#include "intel_bufmgr.h"
IGT_TEST_DESCRIPTION("Check for use-after-free in the fence stealing code.");
-static drm_intel_bufmgr *bufmgr;
-struct intel_batchbuffer *batch;
-uint32_t devid;
-
#define TEST_SIZE (1024*1024)
#define TEST_STRIDE (4*1024)
+#define TEST_HEIGHT (TEST_SIZE/TEST_STRIDE)
+#define TEST_WIDTH (TEST_STRIDE/4)
uint32_t data[TEST_SIZE/4];
igt_simple_main
{
- int i, ret, fd, num_fences;
- drm_intel_bo *busy_bo, *test_bo;
- uint32_t tiling = I915_TILING_X;
+ int i, fd, num_fences;
+ struct intel_bb *ibb;
+ struct buf_ops *bops;
+ struct intel_buf *test_buf;
+ igt_spin_t *busy;
+ uint32_t ring = I915_EXEC_DEFAULT;
for (i = 0; i < 1024*256; i++)
data[i] = i;
@@ -77,86 +77,53 @@ igt_simple_main
igt_require_gem(fd);
gem_require_blitter(fd);
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- devid = intel_get_drm_devid(fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ bops = buf_ops_create(fd);
+ ibb = intel_bb_create_with_relocs(fd, 4096);
igt_info("filling ring\n");
- busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
-
- for (i = 0; i < 250; i++) {
- BLIT_COPY_BATCH_START(0);
- OUT_BATCH((3 << 24) | /* 32 bits */
- (0xcc << 16) | /* copy ROP */
- 2*1024*4);
- OUT_BATCH(0 << 16 | 1024);
- OUT_BATCH((2048) << 16 | (2048));
- OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
- OUT_BATCH(0 << 16 | 0);
- OUT_BATCH(2*1024*4);
- OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
- ADVANCE_BATCH();
-
- if (batch->gen >= 6) {
- BEGIN_BATCH(3, 0);
- OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
- OUT_BATCH(0);
- OUT_BATCH(0);
- ADVANCE_BATCH();
- }
- }
- intel_batchbuffer_flush(batch);
+ if (HAS_BLT_RING(ibb->devid))
+ ring = I915_EXEC_BLT;
+ busy = igt_spin_new(fd, .ctx = gem_context_create(fd), .engine = ring);
num_fences = gem_available_fences(fd);
igt_info("creating havoc on %i fences\n", num_fences);
for (i = 0; i < num_fences*2; i++) {
- test_bo = drm_intel_bo_alloc(bufmgr, "test_bo",
- TEST_SIZE, 4096);
- ret = drm_intel_bo_set_tiling(test_bo, &tiling, TEST_STRIDE);
- igt_assert(ret == 0);
-
- drm_intel_bo_disable_reuse(test_bo);
-
- BLIT_COPY_BATCH_START(0);
- OUT_BATCH((3 << 24) | /* 32 bits */
+ test_buf = intel_buf_create(bops, TEST_WIDTH, TEST_HEIGHT,
+ 32, 0, I915_TILING_X,
+ I915_COMPRESSION_NONE);
+ igt_assert(TEST_STRIDE == test_buf->surface[0].stride);
+
+ intel_bb_add_intel_buf(ibb, test_buf, true);
+ intel_bb_blit_start(ibb, 0);
+ intel_bb_out(ibb, (3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
TEST_STRIDE);
- OUT_BATCH(0 << 16 | 0);
- OUT_BATCH((1) << 16 | (1));
- OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
- OUT_BATCH(0 << 16 | 0);
- OUT_BATCH(TEST_STRIDE);
- OUT_RELOC_FENCED(test_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
- ADVANCE_BATCH();
- intel_batchbuffer_flush(batch);
- igt_info("test bo offset: %#lx\n", test_bo->offset);
-
- drm_intel_bo_unreference(test_bo);
+ intel_bb_out(ibb, 0 << 16 | 0);
+ intel_bb_out(ibb, (1) << 16 | (1));
+ intel_bb_emit_reloc_fenced(ibb, test_buf->handle,
+ I915_GEM_DOMAIN_RENDER,
+ I915_GEM_DOMAIN_RENDER, 0, 0x0);
+ intel_bb_out(ibb, 0 << 16 | 0);
+ intel_bb_out(ibb, TEST_STRIDE);
+ intel_bb_emit_reloc_fenced(ibb, test_buf->handle,
+ I915_GEM_DOMAIN_RENDER, 0, 0, 0x0);
+
+ intel_bb_flush_blit(ibb);
+ igt_info("test bo offset: %#lx\n",
+ intel_bb_get_object_offset(ibb, test_buf->handle));
+
+ intel_buf_destroy(test_buf);
+ intel_bb_reset(ibb, true);
}
- /* launch a few batchs to ensure the damaged slab objects get reused. */
- for (i = 0; i < 10; i++) {
- BLIT_COPY_BATCH_START(0);
- OUT_BATCH((3 << 24) | /* 32 bits */
- (0xcc << 16) | /* copy ROP */
- 2*1024*4);
- OUT_BATCH(0 << 16 | 1024);
- OUT_BATCH((1) << 16 | (1));
- OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
- OUT_BATCH(0 << 16 | 0);
- OUT_BATCH(2*1024*4);
- OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
- ADVANCE_BATCH();
-
- if (batch->gen >= 8) {
- BEGIN_BATCH(3, 0);
- OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
- OUT_BATCH(0);
- OUT_BATCH(0);
- ADVANCE_BATCH();
- }
- }
- intel_batchbuffer_flush(batch);
+ /* launch a few batchs to ensure the damaged slab object get reused. */
+ igt_spin_end(busy);
+ for (i = 0; i < 10; i++)
+ gem_execbuf(fd, &busy->execbuf);
+
+ gem_context_destroy(fd, busy->execbuf.rsvd1);
+ igt_spin_free(fd, busy);
+ intel_bb_destroy(ibb);
+ buf_ops_destroy(bops);
}
--
2.20.1
More information about the igt-dev
mailing list