[Intel-gfx] [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings

Zhao Yakui yakui.zhao at intel.com
Wed Apr 23 03:13:19 CEST 2014


On Tue, 2014-04-22 at 13:44 -0600, Daniel Vetter wrote:
> On Tue, Apr 22, 2014 at 02:52:04PM +0300, Imre Deak wrote:
> > On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > > it is transparent to the user-space driver. In such case it needs to check
> > > the ring sync between the two BSD rings. At the same time it also needs to
> > > check the sync among the second BSD ring and the other rings.
> > > 
> > > Signed-off-by: Zhao Yakui <yakui.zhao at intel.com>
> > > ---
> > >  tests/Makefile.sources          |    1 +
> > >  tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 173 insertions(+)
> > >  create mode 100644 tests/gem_multi_bsd_sync_loop.c
> > > 
> > > diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> > > index c957ace..7cd9ca8 100644
> > > --- a/tests/Makefile.sources
> > > +++ b/tests/Makefile.sources
> > > @@ -105,6 +105,7 @@ TESTS_progs = \
> > >  	gem_render_tiled_blits \
> > >  	gem_ring_sync_copy \
> > >  	gem_ring_sync_loop \
> > > +	gem_multi_bsd_sync_loop \
> > >  	gem_seqno_wrap \
> > >  	gem_set_tiling_vs_gtt \
> > >  	gem_set_tiling_vs_pwrite \
> > > diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
> > > new file mode 100644
> > > index 0000000..7f5b832
> > > --- /dev/null
> > > +++ b/tests/gem_multi_bsd_sync_loop.c
> > > @@ -0,0 +1,172 @@
> > > +/*
> > > + * Copyright © 2014 Intel Corporation
> > > + *
> > > + * Permission is hereby granted, free of charge, to any person obtaining a
> > > + * copy of this software and associated documentation files (the "Software"),
> > > + * to deal in the Software without restriction, including without limitation
> > > + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > > + * and/or sell copies of the Software, and to permit persons to whom the
> > > + * Software is furnished to do so, subject to the following conditions:
> > > + *
> > > + * The above copyright notice and this permission notice (including the next
> > > + * paragraph) shall be included in all copies or substantial portions of the
> > > + * Software.
> > > + *
> > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> > > + * IN THE SOFTWARE.
> > > + *
> > > + * Authors:
> > > + *    Daniel Vetter <daniel.vetter at ffwll.ch> (based on gem_ring_sync_loop_*.c)
> > > + *    Zhao Yakui <yakui.zhao at intel.com>
> > > + *
> > > + */
> > > +
> > > +#include <stdlib.h>
> > > +#include <stdio.h>
> > > +#include <string.h>
> > > +#include <fcntl.h>
> > > +#include <inttypes.h>
> > > +#include <errno.h>
> > > +#include <sys/stat.h>
> > > +#include <sys/time.h>
> > > +#include "drm.h"
> > > +#include "ioctl_wrappers.h"
> > > +#include "drmtest.h"
> > > +#include "intel_bufmgr.h"
> > > +#include "intel_batchbuffer.h"
> > > +#include "intel_io.h"
> > > +#include "i830_reg.h"
> > > +#include "intel_chipset.h"
> > > +
> > > +static drm_intel_bufmgr *bufmgr;
> > > +struct intel_batchbuffer *batch;
> > > +static drm_intel_bo *target_buffer;
> > > +
> > > +#define NUM_FD	50
> > > +
> > > +static int mfd[NUM_FD];
> > > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > > +static drm_intel_bo *mbuffer[NUM_FD];
> > > +
> > > +
> > > +/*
> > > + * Testcase: Basic check of ring<->ring sync using a dummy reloc
> > > + *
> > > + * Extremely efficient at catching missed irqs with semaphores=0 ...
> > > + */
> > > +
> > > +#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
> > > +#define MI_DO_COMPARE			(1<<21)
> > > +
> > > +static void
> > > +store_dword_loop(int fd)
> > > +{
> > > +	int i;
> > > +	int num_rings = gem_get_num_rings(fd);
> > > +
> > > +	srandom(0xdeadbeef);
> > > +
> > > +	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
> > > +		int ring, mindex;
> > > +		ring = random() % num_rings + 1;
> > > +		mindex = random() % NUM_FD;
> > > +		batch = mbatch[mindex];
> > > +		if (ring == I915_EXEC_RENDER) {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > > +			OUT_BATCH(0xffffffff); /* compare dword */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP);
> > > +			ADVANCE_BATCH();
> > > +		} else {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > > +			OUT_BATCH(0); /* reserved */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > > +			ADVANCE_BATCH();
> > > +		}
> > > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > > +	}
> > > +
> > > +	drm_intel_bo_map(target_buffer, 0);
> > > +	// map to force waiting on rendering
> > > +	drm_intel_bo_unmap(target_buffer);
> > 
> > This test looks the same as dummy_reloc_loop_random_ring_multi_fd() that
> > you add in patch 2/2, except the above two calls. Unless I'm missing
> > something else .. Is there any reason why we don't want to make this
> > also a subtest of gem_dummy_reloc_loop.c to avoid duplicating all the
> > setup here?
> 
> Historical accident since for the other rings we also have this
> duplication between inter-ring sync tests and ring/cpu sync tests with
> dummy relocs. I don't mind really ;-)

Hi, Imre

    So based on Daniel's reply, we will use the separated test case to
do inter-ring sync test.

Anyway, thanks for your comment.

Thanks.
    Yakui

> -Daniel
> 
> > 
> > --Imre
> > 
> > 
> > > +}
> > > +
> > > +igt_simple_main
> > > +{
> > > +	int fd;
> > > +	int devid;
> > > +	int i;
> > > +
> > > +	fd = drm_open_any();
> > > +	devid = intel_get_drm_devid(fd);
> > > +	gem_require_ring(fd, I915_EXEC_BLT);
> > > +
> > > +
> > > +	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
> > > +	igt_assert(bufmgr);
> > > +	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> > > +
> > > +
> > > +	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> > > +	igt_assert(target_buffer);
> > > +
> > > +	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
> > > +	{
> > > +		unsigned int target_flink;
> > > +		char buffer_name[32];
> > > +		if (dri_bo_flink(target_buffer, &target_flink)) {
> > > +			igt_assert(0);
> > > +			printf("fail to get flink for target buffer\n");
> > > +			goto fail_flink;
> > > +		}
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			mfd[i] = 0;
> > > +			mbufmgr[i] = NULL;
> > > +			mbuffer[i] = NULL;
> > > +		}
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			sprintf(buffer_name, "Target buffer %d\n", i);
> > > +			mfd[i] = drm_open_any();
> > > +			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > > +			igt_assert(mbufmgr[i]);
> > > +			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > > +			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > > +			igt_assert(mbufmgr[i]);
> > > +			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
> > > +			igt_assert(mbuffer[i]);
> > > +		}
> > > +	}
> > > +
> > > +	store_dword_loop(fd);
> > > +
> > > +	{
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			dri_bo_unreference(mbuffer[i]);
> > > +			intel_batchbuffer_free(mbatch[i]);
> > > +			drm_intel_bufmgr_destroy(mbufmgr[i]);
> > > +			close(mfd[i]);
> > > +		}
> > > +	}
> > > +	drm_intel_bo_unreference(target_buffer);
> > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > +
> > > +	close(fd);
> > > +	return;
> > > +
> > > +fail_flink:
> > > +	drm_intel_bo_unreference(target_buffer);
> > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > +
> > > +	close(fd);
> > > +}
> > 
> 
> 
> 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 





More information about the Intel-gfx mailing list