[PATCH i-g-t 2/2] tests/intel/xe_oa: Tests for OA syncs
Umesh Nerlige Ramappa
umesh.nerlige.ramappa at intel.com
Fri Oct 25 19:35:35 UTC 2024
On Fri, Oct 25, 2024 at 11:52:05AM -0700, Ashutosh Dixit wrote:
>Verify OA syncs signal correctly in open and change config code
>paths. Verify with different types of sync objects as well as by both
>waiting and skipping the wait for syncs to signal.
>
>v2: Significantly expand oa syncs testing as described above
Are we just enabling output sync for the OA use case, as in, user can
only wait for the OA configuration to complete, but cannot control when
the OA config starts? Which is what existed earlier, so it should be
fine either ways.
LGTM,
Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
>
>Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
>---
> lib/xe/xe_oa.c | 6 +-
> lib/xe/xe_oa.h | 2 +
> tests/intel/xe_oa.c | 261 ++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 266 insertions(+), 3 deletions(-)
>
>diff --git a/lib/xe/xe_oa.c b/lib/xe/xe_oa.c
>index 4fdd0b8c95..c168669c5d 100644
>--- a/lib/xe/xe_oa.c
>+++ b/lib/xe/xe_oa.c
>@@ -1025,8 +1025,8 @@ const char *intel_xe_perf_read_report_reason(const struct intel_xe_perf *perf,
> return "unknown";
> }
>
>-static void xe_oa_prop_to_ext(struct intel_xe_oa_open_prop *properties,
>- struct drm_xe_ext_set_property *extn)
>+void intel_xe_oa_prop_to_ext(struct intel_xe_oa_open_prop *properties,
>+ struct drm_xe_ext_set_property *extn)
> {
> __u64 *prop = from_user_pointer(properties->properties_ptr);
> struct drm_xe_ext_set_property *ext = extn;
>@@ -1063,7 +1063,7 @@ int intel_xe_perf_ioctl(int fd, enum drm_xe_observation_op op, void *arg)
> struct intel_xe_oa_open_prop *oprop = (struct intel_xe_oa_open_prop *)arg;
>
> igt_assert_lte(oprop->num_properties, XE_OA_MAX_SET_PROPERTIES);
>- xe_oa_prop_to_ext(oprop, ext);
>+ intel_xe_oa_prop_to_ext(oprop, ext);
> }
>
> return igt_ioctl(fd, DRM_IOCTL_XE_OBSERVATION, &p);
>diff --git a/lib/xe/xe_oa.h b/lib/xe/xe_oa.h
>index 962f9dddcc..7d3d074560 100644
>--- a/lib/xe/xe_oa.h
>+++ b/lib/xe/xe_oa.h
>@@ -398,6 +398,8 @@ uint64_t intel_xe_perf_read_record_timestamp_raw(const struct intel_xe_perf *per
> const char *intel_xe_perf_read_report_reason(const struct intel_xe_perf *perf,
> const struct intel_xe_perf_record_header *record);
>
>+void intel_xe_oa_prop_to_ext(struct intel_xe_oa_open_prop *properties,
>+ struct drm_xe_ext_set_property *extn);
> int intel_xe_perf_ioctl(int fd, enum drm_xe_observation_op op, void *arg);
> void intel_xe_perf_ioctl_err(int fd, enum drm_xe_observation_op op, void *arg, int err);
>
>diff --git a/tests/intel/xe_oa.c b/tests/intel/xe_oa.c
>index 92f5828c79..27f30ebe73 100644
>--- a/tests/intel/xe_oa.c
>+++ b/tests/intel/xe_oa.c
>@@ -22,6 +22,7 @@
> #include "drm.h"
> #include "igt.h"
> #include "igt_device.h"
>+#include "igt_syncobj.h"
> #include "igt_sysfs.h"
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
>@@ -4463,6 +4464,231 @@ static void test_mapped_oa_buffer(map_oa_buffer_test_t test_with_fd_open,
> __perf_close(stream_fd);
> }
>
>+
>+/* Return alternative config_id if available, else just return config_id */
>+static void find_alt_oa_config(u32 config_id, u32 *alt_config_id)
>+{
>+ struct dirent *entry;
>+ int metrics_fd, dir_fd;
>+ DIR *metrics_dir;
>+ bool ret;
>+
>+ metrics_fd = openat(sysfs, "metrics", O_DIRECTORY);
>+ igt_assert_lte(0, metrics_fd);
>+
>+ metrics_dir = fdopendir(metrics_fd);
>+ igt_assert(metrics_dir);
>+
>+ while ((entry = readdir(metrics_dir))) {
>+ if (entry->d_type != DT_DIR)
>+ continue;
>+
>+ dir_fd = openat(metrics_fd, entry->d_name, O_RDONLY);
>+ ret = __igt_sysfs_get_u32(dir_fd, "id", alt_config_id);
>+ close(dir_fd);
>+ if (!ret)
>+ continue;
>+
>+ if (config_id != *alt_config_id)
>+ goto exit;
>+ }
>+
>+ *alt_config_id = config_id;
>+exit:
>+ closedir(metrics_dir);
>+}
>+
>+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
>+
>+#define WAIT (0x1 << 0)
>+#define CONFIG (0x1 << 1)
>+
>+enum oa_sync_type {
>+ OA_SYNC_TYPE_SYNCOBJ,
>+ OA_SYNC_TYPE_USERPTR,
>+ OA_SYNC_TYPE_UFENCE,
>+};
>+
>+struct oa_sync {
>+ enum oa_sync_type sync_type;
>+ u32 syncobj;
>+ u32 vm;
>+ u32 bo;
>+ size_t bo_size;
>+ struct {
>+ uint64_t vm_sync;
>+ uint64_t pad;
>+ uint64_t oa_sync;
>+ } *data;
>+};
>+
>+static void
>+oa_sync_init(enum oa_sync_type sync_type, const struct drm_xe_engine_class_instance *hwe,
>+ struct oa_sync *osync, struct drm_xe_sync *sync)
>+{
>+ uint64_t addr = 0x1a0000;
>+
>+ osync->sync_type = sync_type;
>+ sync->flags = DRM_XE_SYNC_FLAG_SIGNAL;
>+
>+ switch (osync->sync_type) {
>+ case OA_SYNC_TYPE_SYNCOBJ:
>+ osync->syncobj = syncobj_create(drm_fd, 0);
>+ sync->handle = osync->syncobj;
>+ sync->type = DRM_XE_SYNC_TYPE_SYNCOBJ;
>+ break;
>+ case OA_SYNC_TYPE_USERPTR:
>+ case OA_SYNC_TYPE_UFENCE:
>+ sync->type = DRM_XE_SYNC_TYPE_USER_FENCE;
>+ sync->timeline_value = USER_FENCE_VALUE;
>+
>+ osync->vm = xe_vm_create(drm_fd, 0, 0);
>+ osync->bo_size = xe_bb_size(drm_fd, sizeof(*osync->data));
>+ if (osync->sync_type == OA_SYNC_TYPE_USERPTR) {
>+ osync->data = aligned_alloc(xe_get_default_alignment(drm_fd),
>+ osync->bo_size);
>+ igt_assert(osync->data);
>+ } else {
>+ osync->bo = xe_bo_create(drm_fd, osync->vm, osync->bo_size,
>+ vram_if_possible(drm_fd, hwe->gt_id),
>+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
>+ osync->data = xe_bo_map(drm_fd, osync->bo, osync->bo_size);
>+ }
>+ memset(osync->data, 0, osync->bo_size);
>+
>+ sync->addr = to_user_pointer(&osync->data[0].vm_sync);
>+ if (osync->bo)
>+ xe_vm_bind_async(drm_fd, osync->vm, 0, osync->bo, 0,
>+ addr, osync->bo_size, sync, 1);
>+ else
>+ xe_vm_bind_userptr_async(drm_fd, osync->vm, 0,
>+ to_user_pointer(osync->data),
>+ addr, osync->bo_size, sync, 1);
>+ xe_wait_ufence(drm_fd, &osync->data[0].vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+
>+ sync->addr = to_user_pointer(&osync->data[0].oa_sync);
>+ break;
>+ default:
>+ igt_assert(false);
>+ }
>+}
>+
>+static void oa_sync_wait(struct oa_sync *osync)
>+{
>+ switch (osync->sync_type) {
>+ case OA_SYNC_TYPE_SYNCOBJ:
>+ igt_assert(syncobj_wait(drm_fd, &osync->syncobj, 1, INT64_MAX, 0, NULL));
>+ syncobj_reset(drm_fd, &osync->syncobj, 1);
>+ break;
>+ case OA_SYNC_TYPE_USERPTR:
>+ case OA_SYNC_TYPE_UFENCE:
>+ xe_wait_ufence(drm_fd, &osync->data[0].oa_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+ osync->data[0].oa_sync = 0;
>+ break;
>+ default:
>+ igt_assert(false);
>+ }
>+}
>+
>+static void oa_sync_free(struct oa_sync *osync)
>+{
>+ switch (osync->sync_type) {
>+ case OA_SYNC_TYPE_SYNCOBJ:
>+ syncobj_destroy(drm_fd, osync->syncobj);
>+ break;
>+ case OA_SYNC_TYPE_USERPTR:
>+ case OA_SYNC_TYPE_UFENCE:
>+ if (osync->bo) {
>+ munmap(osync->data, osync->bo_size);
>+ gem_close(drm_fd, osync->bo);
>+ } else {
>+ free(osync->data);
>+ }
>+ xe_vm_destroy(drm_fd, osync->vm);
>+ break;
>+ default:
>+ igt_assert(false);
>+ }
>+}
>+
>+/**
>+ * SUBTEST: syncs-%s-%s
>+ *
>+ * Description: Test OA syncs (with %arg[1] sync types and %arg[2] wait and
>+ * reconfig flags) signal correctly in open and reconfig code
>+ * paths
>+ *
>+ * arg[1]:
>+ *
>+ * @syncobj: sync type syncobj
>+ * @userptr: sync type userptr
>+ * @ufence: sync type ufence
>+ *
>+ * arg[2]:
>+ *
>+ * @wait-cfg: Exercise reconfig path and wait for syncs to signal
>+ * @wait: Don't exercise reconfig path and wait for syncs to signal
>+ * @cfg: Exercise reconfig path but don't wait for syncs to signal
>+ * @none: Don't exercise reconfig path and don't wait for syncs to signal
>+ */
>+static void test_syncs(const struct drm_xe_engine_class_instance *hwe,
>+ enum oa_sync_type sync_type, int flags)
>+{
>+ struct drm_xe_ext_set_property extn[XE_OA_MAX_SET_PROPERTIES] = {};
>+ struct intel_xe_perf_metric_set *test_set = metric_set(hwe);
>+ struct drm_xe_sync sync = {};
>+ struct oa_sync osync = {};
>+ uint64_t open_properties[] = {
>+ DRM_XE_OA_PROPERTY_OA_UNIT_ID, 0,
>+ DRM_XE_OA_PROPERTY_SAMPLE_OA, true,
>+ DRM_XE_OA_PROPERTY_OA_METRIC_SET, test_set->perf_oa_metrics_set,
>+ DRM_XE_OA_PROPERTY_OA_FORMAT, __ff(test_set->perf_oa_format),
>+ DRM_XE_OA_PROPERTY_NUM_SYNCS, 1,
>+ DRM_XE_OA_PROPERTY_SYNCS, to_user_pointer(&sync),
>+ };
>+ struct intel_xe_oa_open_prop open_param = {
>+ .num_properties = ARRAY_SIZE(open_properties) / 2,
>+ .properties_ptr = to_user_pointer(open_properties),
>+ };
>+ uint64_t config_properties[] = {
>+ DRM_XE_OA_PROPERTY_OA_METRIC_SET, 0, /* Filled later */
>+ DRM_XE_OA_PROPERTY_NUM_SYNCS, 1,
>+ DRM_XE_OA_PROPERTY_SYNCS, to_user_pointer(&sync),
>+ };
>+ struct intel_xe_oa_open_prop config_param = {
>+ .num_properties = ARRAY_SIZE(config_properties) / 2,
>+ .properties_ptr = to_user_pointer(config_properties),
>+ };
>+ uint32_t alt_config_id;
>+ int ret;
>+
>+ oa_sync_init(sync_type, hwe, &osync, &sync);
>+
>+ stream_fd = __perf_open(drm_fd, &open_param, false);
>+
>+ /* Reset the sync object if we are going to reconfig the stream */
>+ if (flags & (WAIT | CONFIG))
>+ oa_sync_wait(&osync);
>+
>+ if (!(flags & CONFIG))
>+ goto exit;
>+
>+ /* Change stream configuration */
>+ find_alt_oa_config(test_set->perf_oa_metrics_set, &alt_config_id);
>+
>+ config_properties[1] = alt_config_id;
>+ intel_xe_oa_prop_to_ext(&config_param, extn);
>+
>+ ret = igt_ioctl(stream_fd, DRM_XE_OBSERVATION_IOCTL_CONFIG, extn);
>+ igt_assert_eq(ret, test_set->perf_oa_metrics_set);
>+
>+ if (flags & WAIT)
>+ oa_sync_wait(&osync);
>+exit:
>+ __perf_close(stream_fd);
>+ oa_sync_free(&osync);
>+}
>+
> static const char *xe_engine_class_name(uint32_t engine_class)
> {
> switch (engine_class) {
>@@ -4511,6 +4737,25 @@ static const char *xe_engine_class_name(uint32_t engine_class)
>
> igt_main
> {
>+ const struct sync_section {
>+ const char *name;
>+ enum oa_sync_type sync_type;
>+ unsigned int flags;
>+ } sync_sections[] = {
>+ { "syncobj-wait-cfg", OA_SYNC_TYPE_SYNCOBJ, WAIT | CONFIG},
>+ { "syncobj-wait", OA_SYNC_TYPE_SYNCOBJ, WAIT },
>+ { "syncobj-cfg", OA_SYNC_TYPE_SYNCOBJ, CONFIG },
>+ { "syncobj-none", OA_SYNC_TYPE_SYNCOBJ, 0 },
>+ { "userptr-wait-cfg", OA_SYNC_TYPE_USERPTR, WAIT | CONFIG},
>+ { "userptr-wait", OA_SYNC_TYPE_USERPTR, WAIT },
>+ { "userptr-cfg", OA_SYNC_TYPE_USERPTR, CONFIG },
>+ { "userptr-none", OA_SYNC_TYPE_USERPTR, 0 },
>+ { "ufence-wait-cfg", OA_SYNC_TYPE_UFENCE, WAIT | CONFIG},
>+ { "ufence-wait", OA_SYNC_TYPE_UFENCE, WAIT },
>+ { "ufence-cfg", OA_SYNC_TYPE_UFENCE, CONFIG },
>+ { "ufence-none", OA_SYNC_TYPE_UFENCE, 0 },
>+ { NULL },
>+ };
> struct drm_xe_engine_class_instance *hwe = NULL;
> struct xe_device *xe_dev;
>
>@@ -4713,6 +4958,22 @@ igt_main
> }
> }
>
>+ igt_subtest_group {
>+ igt_fixture {
>+ struct drm_xe_query_oa_units *qoa = xe_oa_units(drm_fd);
>+ struct drm_xe_oa_unit *oau = (struct drm_xe_oa_unit *)&qoa->oa_units[0];
>+
>+ igt_require(oau->capabilities & DRM_XE_OA_CAPS_SYNCS);
>+ }
>+
>+ for (const struct sync_section *s = sync_sections; s->name; s++) {
>+ igt_subtest_with_dynamic_f("syncs-%s", s->name)
>+ __for_one_render_engine(hwe)
>+ test_syncs(hwe, s->sync_type, s->flags);
>+
>+ }
>+ }
>+
> igt_fixture {
> /* leave sysctl options in their default state... */
> write_u64_file("/proc/sys/dev/xe/observation_paranoid", 1);
>--
>2.41.0
>
More information about the igt-dev
mailing list