[igt-dev] [PATCH i-g-t v5 2/2] tests: add slice power programming test

Kelvin Gardiner kelvin.gardiner at intel.com
Thu May 24 17:36:13 UTC 2018



On 24/05/18 07:58, Lionel Landwerlin wrote:
> Verifies that the kernel programs slices correctly based by reading
> the value of PWR_CLK_STATE register or MI_SET_PREDICATE on platforms
> before Cannonlake.
> 
> v2: Add subslice tests (Lionel)
>      Use MI_SET_PREDICATE for further verification when available (Lionel)
> 
> v3: Rename to gem_ctx_rpcs (Lionel)
> 
> v4: Update kernel API (Lionel)
>      Add 0 value test (Lionel)
>      Exercise invalid values (Lionel)
> 
> v5: Add perf tests (Lionel)
> 
> v6: Add new sysfs entry tests (Lionel)
> 
> v7: Test rsvd fields
>      Update for kernel series changes
> 
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> ---
>   tests/Makefile.sources |    1 +
>   tests/gem_ctx_param.c  |    4 +-
>   tests/gem_ctx_sseu.c   | 1017 ++++++++++++++++++++++++++++++++++++++++
>   tests/meson.build      |    1 +
>   4 files changed, 1022 insertions(+), 1 deletion(-)
>   create mode 100644 tests/gem_ctx_sseu.c
> 
> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> index ad62611f..fd44b720 100644
> --- a/tests/Makefile.sources
> +++ b/tests/Makefile.sources
> @@ -60,6 +60,7 @@ TESTS_progs = \
>   	gem_ctx_exec \
>   	gem_ctx_isolation \
>   	gem_ctx_param \
> +	gem_ctx_sseu \
>   	gem_ctx_switch \
>   	gem_ctx_thrash \
>   	gem_double_irq_loop \
> diff --git a/tests/gem_ctx_param.c b/tests/gem_ctx_param.c
> index c46fd709..af1afeaa 100644
> --- a/tests/gem_ctx_param.c
> +++ b/tests/gem_ctx_param.c
> @@ -294,11 +294,13 @@ igt_main
>   			set_priority(fd);
>   	}
>   
> +	/* I915_CONTEXT_PARAM_SSEU tests are located in gem_ctx_sseu.c */
> +
>   	/* NOTE: This testcase intentionally tests for the next free parameter
>   	 * to catch ABI extensions. Don't "fix" this testcase without adding all
>   	 * the tests for the new param first.
>   	 */
> -	arg.param = I915_CONTEXT_PARAM_PRIORITY + 1;
> +	arg.param = I915_CONTEXT_PARAM_SSEU + 1;
>   
>   	igt_subtest("invalid-param-get") {
>   		arg.ctx_id = ctx;
> diff --git a/tests/gem_ctx_sseu.c b/tests/gem_ctx_sseu.c
> new file mode 100644
> index 00000000..0c239e46
> --- /dev/null
> +++ b/tests/gem_ctx_sseu.c
> @@ -0,0 +1,1017 @@
> +/*
> + * Copyright © 2017 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> + *
> + */
> +
> +#include "igt.h"
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <signal.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <sys/wait.h>
> +
> +#include "intel_bufmgr.h"
> +#include "igt_sysfs.h"
> +
> +IGT_TEST_DESCRIPTION("Test context render powergating programming.");
> +
> +#define MI_STORE_REGISTER_MEM (0x24 << 23)
> +
> +#define MI_SET_PREDICATE      (0x1 << 23)
> +#define  MI_SET_PREDICATE_NOOP_NEVER (0)
> +#define  MI_SET_PREDICATE_1_SLICES   (5)
> +#define  MI_SET_PREDICATE_2_SLICES   (6)
> +#define  MI_SET_PREDICATE_3_SLICES   (7)
> +
> +#define GEN8_R_PWR_CLK_STATE		0x20C8
> +#define   GEN8_RPCS_ENABLE		(1 << 31)
> +#define   GEN8_RPCS_S_CNT_ENABLE	(1 << 18)
> +#define   GEN8_RPCS_S_CNT_SHIFT		15
> +#define   GEN8_RPCS_S_CNT_MASK		(0x7 << GEN8_RPCS_S_CNT_SHIFT)
> +#define   GEN8_RPCS_SS_CNT_ENABLE	(1 << 11)
> +#define   GEN8_RPCS_SS_CNT_SHIFT	8
> +#define   GEN8_RPCS_SS_CNT_MASK		(0x7 << GEN8_RPCS_SS_CNT_SHIFT)
> +#define   GEN8_RPCS_EU_MAX_SHIFT	4
> +#define   GEN8_RPCS_EU_MAX_MASK		(0xf << GEN8_RPCS_EU_MAX_SHIFT)
> +#define   GEN8_RPCS_EU_MIN_SHIFT	0
> +#define   GEN8_RPCS_EU_MIN_MASK		(0xf << GEN8_RPCS_EU_MIN_SHIFT)
> +
> +#define RCS_TIMESTAMP (0x2000 + 0x358)
> +
> +static int drm_fd;
> +static int devid;
> +static uint64_t device_slice_mask = 0;
> +static uint64_t device_subslice_mask = 0;
> +static uint32_t device_slice_count = 0;
> +static uint32_t device_subslice_count = 0;
> +
> +static uint64_t mask_minus_one(uint64_t mask)
> +{
> +	int i;
> +
> +	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
> +		if ((1UL << i) & mask) {
> +			return mask & ~(1UL << i);
> +		}
> +	}
> +
> +	igt_assert(!"reached");
> +	return 0;
> +}
> +
> +static uint64_t mask_plus_one(uint64_t mask)
> +{
> +	int i;
> +
> +	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
> +		if (((1UL << i) & mask) == 0) {
> +			return mask | (1UL << i);
> +		}
> +	}
> +
> +	igt_assert(!"reached");
> +	return 0;
> +}
> +
> +static uint64_t mask_minus(uint64_t mask, int n)
> +{
> +	int i;
> +
> +	for (i = 0; i < n; i++)
> +		mask = mask_minus_one(mask);
> +
> +	return mask;
> +}
> +
> +static uint64_t mask_plus(uint64_t mask, int n)
> +{
> +	int i;
> +
> +	for (i = 0; i < n; i++)
> +		mask = mask_plus_one(mask);
> +
> +	return mask;
> +}
> +
> +static uint32_t
> +read_rpcs_reg(drm_intel_bufmgr *bufmgr,
> +	      drm_intel_context *context,
> +	      uint32_t expected_slices)
> +{
> +	struct intel_batchbuffer *batch;
> +	drm_intel_bo *dst_bo;
> +	uint32_t rpcs;
> +	unsigned dwords = 3 + 3, relocs = 1 + 1;
> +	int ret;
> +
> +	batch = intel_batchbuffer_alloc(bufmgr, devid);
> +	igt_assert(batch);
> +
> +	intel_batchbuffer_set_context(batch, context);
> +
> +	dst_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> +	igt_assert(dst_bo);
> +
> +	/* Clear destination buffer. */
> +	ret = drm_intel_bo_map(dst_bo, true /* write enable */);
> +	igt_assert_eq(ret, 0);
> +	memset(dst_bo->virtual, 0, dst_bo->size);
> +	drm_intel_bo_unmap(dst_bo);
> +
> +	/*
> +	 * Prior to Gen10 we can use the predicate to further verify
> +	 * that the hardware has been programmed correctly.
> +	 */
> +	if (expected_slices != 0 && intel_gen(devid) < 10) {
> +		BEGIN_BATCH(dwords + 2, relocs);
> +		OUT_BATCH(MI_SET_PREDICATE | (1 - 1) |
> +			  (MI_SET_PREDICATE_1_SLICES + expected_slices - 1));
> +	} else {
> +		BEGIN_BATCH(dwords, relocs);
> +	}
> +
> +	OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
> +	OUT_BATCH(RCS_TIMESTAMP);
> +	OUT_RELOC(dst_bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
> +
> +	OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
> +	OUT_BATCH(GEN8_R_PWR_CLK_STATE);
> +	OUT_RELOC(dst_bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 4);
> +
> +	if (expected_slices != 0 && intel_gen(devid) < 10)
> +		OUT_BATCH(MI_SET_PREDICATE | (1 - 1) | MI_SET_PREDICATE_NOOP_NEVER);
> +
> +	ADVANCE_BATCH();
> +
> +	intel_batchbuffer_flush_on_ring(batch, I915_EXEC_RENDER);
> +
> +	drm_intel_bo_wait_rendering(dst_bo);
> +
> +	ret = drm_intel_bo_map(dst_bo, false /* write enable */);
> +	igt_assert_eq(ret, 0);
> +
> +	uint32_t *ts = (uint32_t *) (dst_bo->virtual);
> +
> +	rpcs = *((uint32_t *) (dst_bo->virtual + 4));
> +
> +	igt_debug("rcs_timestamp=0x%x rpcs=0x%x\n", *ts, rpcs);
> +
> +	drm_intel_bo_unmap(dst_bo);
> +
> +	drm_intel_bo_unreference(dst_bo);
> +
> +	intel_batchbuffer_free(batch);
> +
> +	return rpcs;
> +}
> +
> +static uint32_t
> +read_slice_count(drm_intel_bufmgr *bufmgr,
> +		 drm_intel_context *context,
> +		 uint32_t expected_slice_count)
> +{
> +	return (read_rpcs_reg(bufmgr, context, expected_slice_count) & GEN8_RPCS_S_CNT_MASK)
> +		>> GEN8_RPCS_S_CNT_SHIFT;
> +}
> +
> +static uint32_t
> +read_subslice_count(drm_intel_bufmgr *bufmgr,
> +		    drm_intel_context *context)
> +{
> +	return (read_rpcs_reg(bufmgr, context, 0) & GEN8_RPCS_SS_CNT_MASK)
> +		>> GEN8_RPCS_SS_CNT_SHIFT;
> +}
> +
> +static bool
> +kernel_has_per_context_sseu_support(void)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context;
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context);
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg))
> +		return false;
> +
> +	drm_intel_gem_context_destroy(context);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +
> +	return true;
> +}
> +
> +static bool
> +platform_has_per_context_sseu_support(void)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context;
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context);
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg))
> +		return false;
> +
> +	if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg))
> +		return false;
> +
> +	drm_intel_gem_context_destroy(context);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +
> +	return true;
> +}
> +
> +static void
> +context_get_sseu_masks(drm_intel_context *context,
> +		       uint32_t *slice_mask,
> +		       uint32_t *subslice_mask)
> +{
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +	if (slice_mask)
> +		*slice_mask = sseu.slice_mask;
> +	if (subslice_mask)
> +		*subslice_mask = sseu.subslice_mask;
> +}
> +
> +static void
> +context_set_slice_mask(drm_intel_context *context, uint32_t slice_mask)
> +{
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +	sseu.slice_mask = slice_mask;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
> +}
> +
> +static void
> +context_set_subslice_mask(drm_intel_context *context, uint32_t subslice_mask)
> +{
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +	sseu.subslice_mask = subslice_mask;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
> +}
> +
> +/*
> + * Verify that we can program the slice count.
> + */
> +static void
> +test_sseu_slice_program_gt(uint32_t pg_slice_count)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *pg_contexts[2], *df_contexts[2];
> +	uint32_t pg_slice_mask = mask_minus(device_slice_mask, pg_slice_count);
> +	uint32_t slice_count = __builtin_popcount(pg_slice_mask);
> +	uint32_t slice_mask;
> +
> +	igt_debug("Running with %i slices powergated\n", pg_slice_count);
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 10 * 4096);
> +	igt_assert(bufmgr);
> +
> +	pg_contexts[0] = drm_intel_gem_context_create(bufmgr);
> +	pg_contexts[1] = drm_intel_gem_context_create(bufmgr);
> +	df_contexts[0] = drm_intel_gem_context_create(bufmgr);
> +	df_contexts[1] = drm_intel_gem_context_create(bufmgr);
> +
> +	context_set_slice_mask(pg_contexts[0], pg_slice_mask);
> +	context_set_slice_mask(pg_contexts[1], pg_slice_mask);
> +	context_set_slice_mask(df_contexts[0], device_slice_mask);
> +	context_set_slice_mask(df_contexts[1], device_slice_mask);
> +
> +	for (int i = 0; i < ARRAY_SIZE(pg_contexts); i++) {
> +		context_get_sseu_masks(pg_contexts[i], &slice_mask, NULL);
> +		igt_assert_eq(pg_slice_mask, slice_mask);
> +	}
> +
> +	for (int i = 0; i < ARRAY_SIZE(df_contexts); i++) {
> +		context_get_sseu_masks(df_contexts[i], &slice_mask, NULL);
> +		igt_assert_eq(device_slice_mask, slice_mask);
> +	}
> +
> +	/*
> +	 * Test false positives with predicates (only available on
> +	 * before Gen10).
> +	 */
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, pg_contexts[0],
> +						  device_slice_count));
> +	}
> +
> +	igt_debug("pg_contexts:\n");
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, pg_contexts[0],
> +						    slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, pg_contexts[1],
> +						    slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, pg_contexts[0],
> +						    slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, pg_contexts[0],
> +						    slice_count));
> +
> +	igt_debug("df_contexts:\n");
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, df_contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, df_contexts[1],
> +							   device_slice_count));
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, df_contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, df_contexts[0],
> +							   device_slice_count));
> +
> +	igt_debug("mixed:\n");
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, pg_contexts[0],
> +						    slice_count));
> +
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, df_contexts[0],
> +							   device_slice_count));
> +
> +
> +	for (int i = 0; i < ARRAY_SIZE(pg_contexts); i++)
> +		drm_intel_gem_context_destroy(pg_contexts[i]);
> +	for (int i = 0; i < ARRAY_SIZE(df_contexts); i++)
> +		drm_intel_gem_context_destroy(df_contexts[i]);
> +
> +	drm_intel_bufmgr_destroy(bufmgr);
> +}
> +
> +/*
> + * Verify that we can program the subslice count.
> + */
> +static void
> +test_sseu_subslice_program_gt(int pg_subslice_count)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context1, *context2;
> +	uint32_t pg_subslice_mask =
> +		mask_minus(device_subslice_mask, pg_subslice_count);
> +	uint32_t subslice_count = __builtin_popcount(pg_subslice_mask);
> +	uint32_t subslice_mask;
> +
> +	igt_debug("Running with %i subslices powergated\n", pg_subslice_count);
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context1 = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context1);
> +
> +	context2 = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context2);
> +
> +	context_set_subslice_mask(context1, pg_subslice_mask);
> +	context_set_subslice_mask(context2, device_subslice_mask);
> +
> +	context_get_sseu_masks(context1, NULL, &subslice_mask);
> +	igt_assert_eq(pg_subslice_mask, subslice_mask);
> +	context_get_sseu_masks(context2, NULL, &subslice_mask);
> +	igt_assert_eq(device_subslice_mask, subslice_mask);
> +
> +	igt_assert_eq(subslice_count, read_subslice_count(bufmgr, context1));
> +	igt_assert_eq(device_subslice_count, read_subslice_count(bufmgr, context2));
> +
> +	context_set_subslice_mask(context1, device_subslice_mask);
> +	context_set_subslice_mask(context2, pg_subslice_mask);
> +
> +	context_get_sseu_masks(context1, NULL, &subslice_mask);
> +	igt_assert_eq(device_subslice_mask, subslice_mask);
> +	context_get_sseu_masks(context2, NULL, &subslice_mask);
> +	igt_assert_eq(pg_subslice_mask, subslice_mask);
> +
> +	igt_assert_eq(device_subslice_count, read_subslice_count(bufmgr, context1));
> +	igt_assert_eq(subslice_count, read_subslice_count(bufmgr, context2));
> +
> +	drm_intel_gem_context_destroy(context1);
> +	drm_intel_gem_context_destroy(context2);
> +
> +	drm_intel_bufmgr_destroy(bufmgr);
> +}
> +
> +/*
> + * Verify that invalid engine class/instance is properly rejected.
> + */
> +static void
> +test_sseu_invalid_engine(void)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context;
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context);
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	sseu.class = 0xffff; /* invalid */

For all the invalid values, maybe a boundary test would be better.

> +	sseu.instance = 0;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg, EINVAL);
> +
> +	sseu.class = 0;
> +	sseu.instance = 0xffff; /* invalid */
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg, EINVAL);
> +
> +	/*
> +	 * Get some proper values before trying to reprogram them onto
> +	 * an invalid engine.
> +	 */
> +	sseu.class = 0;
> +	sseu.instance = 0;
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +
> +	sseu.class = 0xffff; /* invalid */
> +	sseu.instance = 0;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu.class = 0;
> +	sseu.instance = 0xffff; /* invalid */
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +}
> +
> +/*
> + * Verify that invalid values are rejected.
> + */
> +static void
> +test_sseu_invalid_values(void)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context;
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu default_sseu, sseu;
> +	uint32_t context_id;
> +	int i, ret;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context);
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&default_sseu, 0, sizeof(default_sseu));
> +	default_sseu.class = 0; /* rcs */
> +	default_sseu.instance = 0;
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &default_sseu;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +	arg.value = (uintptr_t) &sseu;
> +
> +        /* Try non 0 rsvd fields. */
> +	sseu = default_sseu;
> +	sseu.rsvd1 = 1;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu = default_sseu;
> +	sseu.rsvd1 = 0xff00ff00;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu = default_sseu;
> +	sseu.rsvd2 = 1;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu = default_sseu;
> +	sseu.rsvd2 = 0xff00ff00;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu = default_sseu;
> +	sseu.rsvd1 = 42;
> +	sseu.rsvd2 = 42 * 42;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	/* Try all slice masks known to be invalid. */
> +	sseu = default_sseu;
> +	for (i = 1; i <= (8 - device_slice_count); i++) {
> +		sseu.slice_mask = mask_plus(device_slice_mask, i);
> +		do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +	}
> +
> +	/* 0 slices. */
> +	sseu.slice_mask = 0;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	/* Try all subslice masks known to be invalid. */
> +	sseu = default_sseu;
> +	for (i = 1; i <= (8 - device_subslice_count); i++) {
> +		sseu.subslice_mask = mask_plus(device_subslice_mask, i);
> +		do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +	}
> +
> +	/* 0 subslices. */
> +	sseu.subslice_mask = 0;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	/* Try number of EUs superior to the max available. */
> +	sseu = default_sseu;
> +	sseu.min_eus_per_subslice = default_sseu.max_eus_per_subslice + 1;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	sseu = default_sseu;
> +	sseu.max_eus_per_subslice = default_sseu.max_eus_per_subslice + 1;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	/* Try to program 0 max EUs. */
> +	sseu = default_sseu;
> +	sseu.max_eus_per_subslice = 0;
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
> +
> +	drm_intel_gem_context_destroy(context);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +}
> +
> +/* Verify that the kernel returns a correct error value on Gen < 8. */
> +static void
> +test_no_sseu_support(void)
> +{
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *context;
> +	struct drm_i915_gem_context_param arg;
> +	struct drm_i915_gem_context_param_sseu sseu;
> +	uint32_t context_id;
> +	int ret;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> +	igt_assert(bufmgr);
> +
> +	context = drm_intel_gem_context_create(bufmgr);
> +	igt_assert(context);
> +
> +	ret = drm_intel_gem_context_get_id(context, &context_id);
> +	igt_assert_eq(ret, 0);
> +
> +	memset(&sseu, 0, sizeof(sseu));
> +	sseu.class = 0; /* rcs */
> +	sseu.instance = 0;
> +
> +	memset(&arg, 0, sizeof(arg));
> +	arg.ctx_id = context_id;
> +	arg.param = I915_CONTEXT_PARAM_SSEU;
> +	arg.value = (uintptr_t) &sseu;
> +
> +	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, ENODEV);
> +
> +	drm_intel_gem_context_destroy(context);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +}
> +
> +static void
> +init_contexts(drm_intel_bufmgr *bufmgr,
> +	      drm_intel_context **contexts,
> +	      int n_contexts,
> +	      uint32_t device_slice_mask,
> +	      uint32_t pg_slice_mask)
> +{
> +	int i;
> +
> +	for (i = 0; i < n_contexts; i++)
> +		contexts[i] = drm_intel_gem_context_create(bufmgr);
> +
> +	context_set_slice_mask(contexts[0], device_slice_mask);
> +	context_set_slice_mask(contexts[1], pg_slice_mask);
> +}
> +
> +/*
> + * Verify that powergating settings are put on hold while i915/perf is
> + * active.
> + */
> +static void
> +test_sseu_perf(void)
> +{
> +	uint64_t properties[] = {
> +		/* Include OA reports in samples */
> +		DRM_I915_PERF_PROP_SAMPLE_OA, true,
> +
> +		/* OA unit configuration */
> +		DRM_I915_PERF_PROP_OA_METRICS_SET, 1, /* test metric */
> +		DRM_I915_PERF_PROP_OA_FORMAT, I915_OA_FORMAT_A32u40_A4u32_B8_C8,
> +		DRM_I915_PERF_PROP_OA_EXPONENT, 20,
> +	};
> +	struct drm_i915_perf_open_param param = {
> +		.flags = I915_PERF_FLAG_FD_CLOEXEC |
> +		I915_PERF_FLAG_FD_NONBLOCK,
> +		.num_properties = ARRAY_SIZE(properties) / 2,
> +		.properties_ptr = to_user_pointer(properties),
> +	};
> +	drm_intel_bufmgr *bufmgr;
> +	drm_intel_context *contexts[2];
> +	uint32_t pg_slice_mask = mask_minus(device_slice_mask, 1);
> +	uint32_t slice_count = __builtin_popcount(pg_slice_mask);
> +	int perf_fd;
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 10 * 4096);
> +	igt_assert(bufmgr);
> +
> +	init_contexts(bufmgr, contexts, 2, device_slice_mask, pg_slice_mask);
> +
> +	/*
> +	 * Test false positives with predicates (only available on
> +	 * before Gen10).
> +	 */
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, contexts[1],
> +						  device_slice_count));
> +	}
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, contexts[1],
> +						    slice_count));
> +
> +	/*
> +	 * Now open i915/perf and verify that all contexts have been
> +	 * reconfigured to the device's default.
> +	 */
> +	perf_fd = igt_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
> +	igt_assert(perf_fd >= 0);
> +
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, contexts[1], slice_count));
> +	}
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[1],
> +							   device_slice_count));
> +
> +	close(perf_fd);
> +
> +	/*
> +	 * After closing the perf stream, configurations should be
> +	 * back to the programmed values.
> +	 */
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, contexts[1],
> +						  device_slice_count));
> +	}
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, contexts[1],
> +						    slice_count));
> +
> +	for (int i = 0; i < ARRAY_SIZE(contexts); i++)
> +		drm_intel_gem_context_destroy(contexts[i]);
> +
> +
> +
> +	/*
> +	 * Open i915/perf first and verify that all contexts created
> +	 * afterward are reconfigured to the device's default.
> +	 */
> +	perf_fd = igt_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
> +	igt_assert(perf_fd >= 0);
> +
> +	init_contexts(bufmgr, contexts, 2, device_slice_mask, pg_slice_mask);
> +
> +	/*
> +	 * Check the device's default values, despite setting
> +	 * otherwise.
> +	 */
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, contexts[1],
> +						  slice_count));
> +	}
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[1],
> +							   device_slice_count));
> +
> +	close(perf_fd);
> +
> +	/*
> +	 * After closing the perf stream, configurations should be
> +	 * back to the programmed values.
> +	 */
> +	if (intel_gen(devid) < 10) {
> +		igt_assert_eq(0, read_slice_count(bufmgr, contexts[1],
> +						  device_slice_count));
> +	}
> +	igt_assert_eq(device_slice_count, read_slice_count(bufmgr, contexts[0],
> +							   device_slice_count));
> +	igt_assert_eq(slice_count, read_slice_count(bufmgr, contexts[1],
> +						    slice_count));
> +
> +	drm_intel_gem_context_destroy(contexts[0]);
> +	drm_intel_gem_context_destroy(contexts[1]);
> +
> +	drm_intel_bufmgr_destroy(bufmgr);
> +}
> +
> +static bool get_allow_dynamic_sseu(int fd)
> +{
> +	int sysfs;
> +	bool ret;
> +
> +	sysfs = igt_sysfs_open(fd, NULL);
> +	igt_assert_lte(0, sysfs);
> +
> +	ret = igt_sysfs_get_boolean(sysfs, "allow_dynamic_sseu");
> +
> +	close(sysfs);
> +	return ret;
> +}
> +
> +static void set_allow_dynamic_sseu(int fd, bool allowed)
> +{
> +	int sysfs;
> +
> +	sysfs = igt_sysfs_open(fd, NULL);
> +	igt_assert_lte(0, sysfs);
> +
> +	igt_assert_eq(true,
> +                      igt_sysfs_set_boolean(sysfs,
> +                                            "allow_dynamic_sseu",
> +                                            allowed));
> +
> +	close(sysfs);
> +}
> +
> +/*
> + * Verify that powergating settings are put on hold while i915/perf is
> + * active.
> + */
> +static void
> +test_dynamic_sseu(bool set_allowed, bool allowed)
> +{
> +	if (set_allowed)
> +		set_allow_dynamic_sseu(drm_fd, allowed);
> +
> +	igt_fork(child, 1) {
> +		uint32_t pg_slice_mask = mask_minus(device_slice_mask, 1);
> +		struct drm_i915_gem_context_param arg;
> +		struct drm_i915_gem_context_param_sseu sseu;
> +		drm_intel_bufmgr *bufmgr;
> +		drm_intel_context *context;
> +		uint32_t context_id;
> +		int ret;
> +
> +		igt_drop_root();
> +
> +		bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 10 * 4096);
> +		igt_assert(bufmgr);
> +
> +		context = drm_intel_gem_context_create(bufmgr);
> +		igt_assert(context);
> +
> +		memset(&sseu, 0, sizeof(sseu));
> +		sseu.class = 0; /* rcs */
> +		sseu.instance = 0;
> +
> +		ret = drm_intel_gem_context_get_id(context, &context_id);
> +		igt_assert_eq(ret, 0);
> +
> +		memset(&arg, 0, sizeof(arg));
> +		arg.ctx_id = context_id;
> +		arg.param = I915_CONTEXT_PARAM_SSEU;
> +		arg.value = (uintptr_t) &sseu;
> +
> +		do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +
> +		sseu.slice_mask = pg_slice_mask;
> +
> +		if (allowed) {
> +			do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
> +		} else {
> +			do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM,
> +				     &arg, EPERM);
> +		}
> +
> +		drm_intel_gem_context_destroy(context);
> +
> +		drm_intel_bufmgr_destroy(bufmgr);
> +	}
> +
> +	igt_waitchildren();
> +}
> +
> +igt_main
> +{
> +	int i, max_slices = 3, max_subslices = 3;
> +	drm_i915_getparam_t gp;
> +
> +	igt_fixture {
> +		/* Use drm_open_driver to verify device existence */
> +		drm_fd = drm_open_driver(DRIVER_INTEL);
> +		igt_require_gem(drm_fd);
> +
> +		devid = intel_get_drm_devid(drm_fd);
> +
> +		/* Old kernel? */
> +		igt_require(kernel_has_per_context_sseu_support());
> +
> +		gp.param = I915_PARAM_SLICE_MASK;
> +		gp.value = (int *) &device_slice_mask;
> +		do_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
> +		device_slice_count = __builtin_popcount(device_slice_mask);
> +
> +		gp.param = I915_PARAM_SUBSLICE_MASK;
> +		gp.value = (int *) &device_subslice_mask;
> +		do_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
> +		device_subslice_count = __builtin_popcount(device_subslice_mask);
> +
> +		igt_require(!get_allow_dynamic_sseu(drm_fd));
> +	}
> +
> +	igt_subtest("default-dynamic-sseu-disallowed") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		igt_require(device_slice_count > 1);
> +		test_dynamic_sseu(false, false);
> +	}
> +
> +	igt_subtest("no-sseu-support") {
> +		igt_require(!platform_has_per_context_sseu_support());
> +		test_no_sseu_support();

Is the same thing effectively being checked twice in the above two 
function calls? If so, I guess it can't be avoided when using the 
igt_require.
We use the kernel to check if a platform supports this feature before we 
run a test. How do we know the kernel is reporting the correct platform 
support?

Kelvin

> +	}
> +
> +	igt_subtest("sseu-invalid-engine") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		test_sseu_invalid_engine();
> +	}
> +
> +	igt_subtest("sseu-invalid-values") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		test_sseu_invalid_values();
> +	}
> +
> +	for (i = 1; i < max_slices; i++) {
> +		igt_subtest_f("sseu-%i-pg-slice-program-rcs", i) {
> +			igt_require(device_slice_count > i);
> +			igt_require(platform_has_per_context_sseu_support());
> +
> +			test_sseu_slice_program_gt(i);
> +		}
> +	}
> +
> +	for (i = 1; i < max_subslices; i++) {
> +		igt_subtest_f("sseu-%i-pg-subslice-program-rcs", i) {
> +			igt_require(device_subslice_count >= 2);
> +			igt_require(platform_has_per_context_sseu_support());
> +
> +			/* Only available on some Atom platforms and Gen10+. */
> +			igt_require(IS_BROXTON(devid) || IS_GEMINILAKE(devid) ||
> +				    intel_gen(devid) >= 10);
> +
> +			test_sseu_subslice_program_gt(i);
> +		}
> +	}
> +
> +	igt_subtest("dynamic-sseu-disallow") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		igt_require(device_slice_count > 1);
> +		test_dynamic_sseu(true, false);
> +	}
> +
> +	igt_subtest("dynamic-sseu-allow") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		igt_require(device_slice_count > 1);
> +		test_dynamic_sseu(true, true);
> +	}
> +
> +	igt_subtest("sseu-perf") {
> +		igt_require(platform_has_per_context_sseu_support());
> +		igt_require(device_slice_count > 1);
> +		test_sseu_perf();
> +	}
> +
> +	igt_fixture {
> +		set_allow_dynamic_sseu(drm_fd, false);
> +
> +		close(drm_fd);
> +	}
> +}
> diff --git a/tests/meson.build b/tests/meson.build
> index cedb4ff1..74111554 100644
> --- a/tests/meson.build
> +++ b/tests/meson.build
> @@ -37,6 +37,7 @@ test_progs = [
>   	'gem_ctx_exec',
>   	'gem_ctx_isolation',
>   	'gem_ctx_param',
> +	'gem_ctx_sseu',
>   	'gem_ctx_switch',
>   	'gem_ctx_thrash',
>   	'gem_double_irq_loop',
> 


More information about the igt-dev mailing list