[igt-dev] [PATCH i-g-t v13 18/31] tests/gem|kms: Remove intel_bb from fixture
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Mon Jan 11 10:24:59 UTC 2021
As intel_bb "opens" connection to allocator when test completes it can
leave allocator in unknown state (mostly in failed tests). As igt_core
was armed in resetting allocator infrastructure connection to it inside
intel_bb is not valid anymore. Trying to use it leads to catastrofic
errors.
Migrate intel_bb out of fixture and create it inside tests individually.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
tests/i915/gem_caching.c | 14 ++++++++--
tests/i915/gem_partial_pwrite_pread.c | 40 +++++++++++++++++----------
tests/i915/gem_render_copy.c | 31 ++++++++++-----------
tests/kms_big_fb.c | 12 +++++---
4 files changed, 61 insertions(+), 36 deletions(-)
diff --git a/tests/i915/gem_caching.c b/tests/i915/gem_caching.c
index bdaff68a0..4e844952f 100644
--- a/tests/i915/gem_caching.c
+++ b/tests/i915/gem_caching.c
@@ -158,7 +158,6 @@ igt_main
flags = 0;
}
data.bops = buf_ops_create(data.fd);
- ibb = intel_bb_create(data.fd, PAGE_SIZE);
scratch_buf = intel_buf_create(data.bops, BO_SIZE/4, 1,
32, 0, I915_TILING_NONE, 0);
@@ -174,6 +173,8 @@ igt_main
igt_info("checking partial reads\n");
+ ibb = intel_bb_create(data.fd, PAGE_SIZE);
+
for (i = 0; i < ROUNDS; i++) {
uint8_t val0 = i;
int start, len;
@@ -195,11 +196,15 @@ igt_main
igt_progress("partial reads test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
igt_subtest("writes") {
igt_require(flags & TEST_WRITE);
+ ibb = intel_bb_create(data.fd, PAGE_SIZE);
+
igt_info("checking partial writes\n");
for (i = 0; i < ROUNDS; i++) {
@@ -240,11 +245,15 @@ igt_main
igt_progress("partial writes test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
igt_subtest("read-writes") {
igt_require((flags & TEST_BOTH) == TEST_BOTH);
+ ibb = intel_bb_create(data.fd, PAGE_SIZE);
+
igt_info("checking partial writes after partial reads\n");
for (i = 0; i < ROUNDS; i++) {
@@ -307,10 +316,11 @@ igt_main
igt_progress("partial read/writes test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
igt_fixture {
- intel_bb_destroy(ibb);
intel_buf_destroy(scratch_buf);
intel_buf_destroy(staging_buf);
buf_ops_destroy(data.bops);
diff --git a/tests/i915/gem_partial_pwrite_pread.c b/tests/i915/gem_partial_pwrite_pread.c
index 72c33539d..c2ca561e3 100644
--- a/tests/i915/gem_partial_pwrite_pread.c
+++ b/tests/i915/gem_partial_pwrite_pread.c
@@ -53,7 +53,6 @@ IGT_TEST_DESCRIPTION("Test pwrite/pread consistency when touching partial"
#define PAGE_SIZE 4096
#define BO_SIZE (4*4096)
-struct intel_bb *ibb;
struct intel_buf *scratch_buf;
struct intel_buf *staging_buf;
@@ -77,7 +76,8 @@ static void *__try_gtt_map_first(data_t *data, struct intel_buf *buf,
return ptr;
}
-static void copy_bo(struct intel_buf *src, struct intel_buf *dst)
+static void copy_bo(struct intel_bb *ibb,
+ struct intel_buf *src, struct intel_buf *dst)
{
bool has_64b_reloc;
@@ -109,8 +109,8 @@ static void copy_bo(struct intel_buf *src, struct intel_buf *dst)
}
static void
-blt_bo_fill(data_t *data, struct intel_buf *tmp_bo,
- struct intel_buf *bo, uint8_t val)
+blt_bo_fill(data_t *data, struct intel_bb *ibb,
+ struct intel_buf *tmp_bo, struct intel_buf *bo, uint8_t val)
{
uint8_t *gtt_ptr;
int i;
@@ -124,7 +124,7 @@ blt_bo_fill(data_t *data, struct intel_buf *tmp_bo,
igt_drop_caches_set(data->drm_fd, DROP_BOUND);
- copy_bo(tmp_bo, bo);
+ copy_bo(ibb, tmp_bo, bo);
}
#define MAX_BLT_SIZE 128
@@ -139,14 +139,17 @@ static void get_range(int *start, int *len)
static void test_partial_reads(data_t *data)
{
+ struct intel_bb *ibb;
int i, j;
+ ibb = intel_bb_create(data->drm_fd, PAGE_SIZE);
+
igt_info("checking partial reads\n");
for (i = 0; i < ROUNDS; i++) {
uint8_t val = i;
int start, len;
- blt_bo_fill(data, staging_buf, scratch_buf, val);
+ blt_bo_fill(data, ibb, staging_buf, scratch_buf, val);
get_range(&start, &len);
gem_read(data->drm_fd, scratch_buf->handle, start, tmp, len);
@@ -159,26 +162,31 @@ static void test_partial_reads(data_t *data)
igt_progress("partial reads test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
static void test_partial_writes(data_t *data)
{
+ struct intel_bb *ibb;
int i, j;
uint8_t *gtt_ptr;
+ ibb = intel_bb_create(data->drm_fd, PAGE_SIZE);
+
igt_info("checking partial writes\n");
for (i = 0; i < ROUNDS; i++) {
uint8_t val = i;
int start, len;
- blt_bo_fill(data, staging_buf, scratch_buf, val);
+ blt_bo_fill(data, ibb, staging_buf, scratch_buf, val);
memset(tmp, i + 63, BO_SIZE);
get_range(&start, &len);
gem_write(data->drm_fd, scratch_buf->handle, start, tmp, len);
- copy_bo(scratch_buf, staging_buf);
+ copy_bo(ibb, scratch_buf, staging_buf);
gtt_ptr = __try_gtt_map_first(data, staging_buf, 0);
for (j = 0; j < start; j++) {
@@ -200,19 +208,24 @@ static void test_partial_writes(data_t *data)
igt_progress("partial writes test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
static void test_partial_read_writes(data_t *data)
{
+ struct intel_bb *ibb;
int i, j;
uint8_t *gtt_ptr;
+ ibb = intel_bb_create(data->drm_fd, PAGE_SIZE);
+
igt_info("checking partial writes after partial reads\n");
for (i = 0; i < ROUNDS; i++) {
uint8_t val = i;
int start, len;
- blt_bo_fill(data, staging_buf, scratch_buf, val);
+ blt_bo_fill(data, ibb, staging_buf, scratch_buf, val);
/* partial read */
get_range(&start, &len);
@@ -226,7 +239,7 @@ static void test_partial_read_writes(data_t *data)
/* Change contents through gtt to make the pread cachelines
* stale. */
val += 17;
- blt_bo_fill(data, staging_buf, scratch_buf, val);
+ blt_bo_fill(data, ibb, staging_buf, scratch_buf, val);
/* partial write */
memset(tmp, i + 63, BO_SIZE);
@@ -234,7 +247,7 @@ static void test_partial_read_writes(data_t *data)
get_range(&start, &len);
gem_write(data->drm_fd, scratch_buf->handle, start, tmp, len);
- copy_bo(scratch_buf, staging_buf);
+ copy_bo(ibb, scratch_buf, staging_buf);
gtt_ptr = __try_gtt_map_first(data, staging_buf, 0);
for (j = 0; j < start; j++) {
@@ -256,6 +269,8 @@ static void test_partial_read_writes(data_t *data)
igt_progress("partial read/writes test: ", i, ROUNDS);
}
+
+ intel_bb_destroy(ibb);
}
static void do_tests(data_t *data, int cache_level, const char *suffix)
@@ -288,8 +303,6 @@ igt_main
data.devid = intel_get_drm_devid(data.drm_fd);
data.bops = buf_ops_create(data.drm_fd);
- ibb = intel_bb_create(data.drm_fd, PAGE_SIZE);
-
/* overallocate the buffers we're actually using because */
scratch_buf = intel_buf_create(data.bops, BO_SIZE/4, 1, 32, 0, I915_TILING_NONE, 0);
staging_buf = intel_buf_create(data.bops, BO_SIZE/4, 1, 32, 0, I915_TILING_NONE, 0);
@@ -303,7 +316,6 @@ igt_main
do_tests(&data, 2, "-display");
igt_fixture {
- intel_bb_destroy(ibb);
intel_buf_destroy(scratch_buf);
intel_buf_destroy(staging_buf);
buf_ops_destroy(data.bops);
diff --git a/tests/i915/gem_render_copy.c b/tests/i915/gem_render_copy.c
index afc490f1a..e48b5b996 100644
--- a/tests/i915/gem_render_copy.c
+++ b/tests/i915/gem_render_copy.c
@@ -58,7 +58,6 @@ typedef struct {
int drm_fd;
uint32_t devid;
struct buf_ops *bops;
- struct intel_bb *ibb;
igt_render_copyfunc_t render_copy;
igt_vebox_copyfunc_t vebox_copy;
} data_t;
@@ -341,6 +340,7 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
enum i915_compression dst_compression,
int flags)
{
+ struct intel_bb *ibb;
struct intel_buf ref, src_tiled, src_ccs, dst_ccs, dst;
struct {
struct intel_buf buf;
@@ -397,6 +397,8 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
src_compressed || dst_compressed)
igt_require(intel_gen(data->devid) >= 9);
+ ibb = intel_bb_create(data->drm_fd, 4096);
+
for (int i = 0; i < num_src; i++)
scratch_buf_init(data, &src[i].buf, WIDTH, HEIGHT, src[i].tiling,
I915_COMPRESSION_NONE);
@@ -456,12 +458,12 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
*/
if (src_mixed_tiled) {
if (dst_compressed)
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
&dst, 0, 0, WIDTH, HEIGHT,
&dst_ccs, 0, 0);
for (int i = 0; i < num_src; i++) {
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
&src[i].buf,
WIDTH/4, HEIGHT/4, WIDTH/2-2, HEIGHT/2-2,
dst_compressed ? &dst_ccs : &dst,
@@ -469,13 +471,13 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
}
if (dst_compressed)
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
&dst_ccs, 0, 0, WIDTH, HEIGHT,
&dst, 0, 0);
} else {
if (src_compression == I915_COMPRESSION_RENDER) {
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
&src_tiled, 0, 0, WIDTH, HEIGHT,
&src_ccs,
0, 0);
@@ -486,7 +488,7 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
"render-src_ccs.bin");
}
} else if (src_compression == I915_COMPRESSION_MEDIA) {
- data->vebox_copy(data->ibb,
+ data->vebox_copy(ibb,
&src_tiled, WIDTH, HEIGHT,
&src_ccs);
if (dump_compressed_src_buf) {
@@ -498,34 +500,34 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
}
if (dst_compression == I915_COMPRESSION_RENDER) {
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
src_compressed ? &src_ccs : &src_tiled,
0, 0, WIDTH, HEIGHT,
&dst_ccs,
0, 0);
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
&dst_ccs,
0, 0, WIDTH, HEIGHT,
&dst,
0, 0);
} else if (dst_compression == I915_COMPRESSION_MEDIA) {
- data->vebox_copy(data->ibb,
+ data->vebox_copy(ibb,
src_compressed ? &src_ccs : &src_tiled,
WIDTH, HEIGHT,
&dst_ccs);
- data->vebox_copy(data->ibb,
+ data->vebox_copy(ibb,
&dst_ccs,
WIDTH, HEIGHT,
&dst);
} else if (force_vebox_dst_copy) {
- data->vebox_copy(data->ibb,
+ data->vebox_copy(ibb,
src_compressed ? &src_ccs : &src_tiled,
WIDTH, HEIGHT,
&dst);
} else {
- data->render_copy(data->ibb,
+ data->render_copy(ibb,
src_compressed ? &src_ccs : &src_tiled,
0, 0, WIDTH, HEIGHT,
&dst,
@@ -572,8 +574,7 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
for (int i = 0; i < num_src; i++)
scratch_buf_fini(&src[i].buf);
- /* handles gone, need to clean the objects cache within intel_bb */
- intel_bb_reset(data->ibb, true);
+ intel_bb_destroy(ibb);
}
static int opt_handler(int opt, int opt_index, void *data)
@@ -796,7 +797,6 @@ igt_main_args("dac", NULL, help_str, opt_handler, NULL)
data.vebox_copy = igt_get_vebox_copyfunc(data.devid);
data.bops = buf_ops_create(data.drm_fd);
- data.ibb = intel_bb_create(data.drm_fd, 4096);
igt_fork_hang_detector(data.drm_fd);
}
@@ -849,7 +849,6 @@ igt_main_args("dac", NULL, help_str, opt_handler, NULL)
igt_fixture {
igt_stop_hang_detector();
- intel_bb_destroy(data.ibb);
buf_ops_destroy(data.bops);
}
}
diff --git a/tests/kms_big_fb.c b/tests/kms_big_fb.c
index 8794ace08..19e731902 100644
--- a/tests/kms_big_fb.c
+++ b/tests/kms_big_fb.c
@@ -662,7 +662,6 @@ igt_main
data.render_copy = igt_get_render_copyfunc(data.devid);
data.bops = buf_ops_create(data.drm_fd);
- data.ibb = intel_bb_create(data.drm_fd, 4096);
}
/*
@@ -675,7 +674,9 @@ igt_main
igt_subtest_f("%s-addfb-size-overflow",
modifiers[i].name) {
data.modifier = modifiers[i].modifier;
+ data.ibb = intel_bb_create(data.drm_fd, 4096);
test_size_overflow(&data);
+ intel_bb_destroy(data.ibb);
}
}
@@ -683,15 +684,18 @@ igt_main
igt_subtest_f("%s-addfb-size-offset-overflow",
modifiers[i].name) {
data.modifier = modifiers[i].modifier;
+ data.ibb = intel_bb_create(data.drm_fd, 4096);
test_size_offset_overflow(&data);
+ intel_bb_destroy(data.ibb);
}
}
for (int i = 0; i < ARRAY_SIZE(modifiers); i++) {
igt_subtest_f("%s-addfb", modifiers[i].name) {
data.modifier = modifiers[i].modifier;
-
+ data.ibb = intel_bb_create(data.drm_fd, 4096);
test_addfb(&data);
+ intel_bb_destroy(data.ibb);
}
}
@@ -709,7 +713,9 @@ igt_main
igt_require(data.format == DRM_FORMAT_C8 ||
igt_fb_supported_format(data.format));
igt_require(igt_display_has_format_mod(&data.display, data.format, data.modifier));
+ data.ibb = intel_bb_create(data.drm_fd, 4096);
test_scanout(&data);
+ intel_bb_destroy(data.ibb);
}
}
@@ -720,8 +726,6 @@ igt_main
igt_fixture {
igt_display_fini(&data.display);
-
- intel_bb_destroy(data.ibb);
buf_ops_destroy(data.bops);
}
}
--
2.26.0
More information about the igt-dev
mailing list