[PATCH i-g-t] tests/xe_sriov_scheduling: pf-priority subtest

Lukasz Laguna lukasz.laguna at intel.com
Thu Mar 6 07:36:39 UTC 2025


Add a subtest to verify that PF gets scheduling priority over other VFs
running the same workload when configured with HIGH priority.

The verification is done by measuring the time PF takes to complete the
workload. This duration should match the workload time as PF should not
be preempted by VFs until it finishes its work.

Signed-off-by: Lukasz Laguna <lukasz.laguna at intel.com
---
 tests/intel/xe_sriov_scheduling.c | 111 ++++++++++++++++++++++++++++++
 1 file changed, 111 insertions(+)

diff --git a/tests/intel/xe_sriov_scheduling.c b/tests/intel/xe_sriov_scheduling.c
index 89cdd8750..60d55688c 100644
--- a/tests/intel/xe_sriov_scheduling.c
+++ b/tests/intel/xe_sriov_scheduling.c
@@ -714,6 +714,105 @@ static void nonpreempt_engine_resets(int pf_fd, int num_vfs,
 	igt_sriov_disable_vfs(pf_fd);
 }
 
+/**
+ * SUBTEST: pf-priority
+ * Description:
+ *   Check that PF gets scheduling priority over other VFs running
+ *   the same workload when configured with HIGH priority.
+ */
+static void pf_priority(int pf_fd, unsigned int num_vfs, const struct subm_opts *opts)
+{
+	struct subm_set set_ = {}, *set = &set_;
+	uint8_t vf_ids[num_vfs + 1 /*PF*/];
+	uint32_t job_timeout_ms = sysfs_get_job_timeout_ms(pf_fd, &xe_engine(pf_fd, 0)->instance);
+	struct vf_sched_params vf_sched_params = prepare_vf_sched_params(num_vfs + 1, 1,
+									 job_timeout_ms, opts);
+	struct job_sched_params job_sched_params;
+	unsigned int gt;
+
+	job_sched_params.sched_params = vf_sched_params;
+	job_sched_params.duration_ms = 1000;
+	job_sched_params.num_repeats = 1;
+
+	igt_info("eq=%ums pt=%uus duration=%ums repeats=%d num_vfs=%d job_timeout=%ums\n",
+		 job_sched_params.sched_params.exec_quantum_ms,
+		 job_sched_params.sched_params.preempt_timeout_us,
+		 job_sched_params.duration_ms, job_sched_params.num_repeats,
+		 num_vfs + 1, job_timeout_ms);
+
+	init_vf_ids(vf_ids, ARRAY_SIZE(vf_ids),
+		    &(struct init_vf_ids_opts){ .shuffle = true,
+						.shuffle_pf = true });
+	xe_sriov_require_default_scheduling_attributes(pf_fd);
+	/* enable VFs */
+	igt_sriov_disable_driver_autoprobe(pf_fd);
+	igt_sriov_enable_vfs(pf_fd, num_vfs);
+	/* set scheduling params (PF and VFs) */
+	set_vfs_scheduling_params(pf_fd, num_vfs, &job_sched_params.sched_params);
+	/* probe VF */
+	igt_sriov_enable_driver_autoprobe(pf_fd);
+	for (int vf = 1; vf <= num_vfs; ++vf)
+		igt_sriov_bind_vf_drm_driver(pf_fd, vf);
+	/* configure PF with high scheduling priority */
+	xe_for_each_gt(pf_fd, gt)
+		xe_sriov_set_sched_priority(pf_fd, 0, gt, XE_SRIOV_SCHED_PRIORITY_HIGH);
+
+	/* init subm_set */
+	subm_set_alloc_data(set, num_vfs + 1 /*PF*/);
+	subm_set_init_sync_method(set, opts->sync_method);
+
+	for (int n = 0; n < set->ndata; ++n) {
+		int vf_fd =
+			vf_ids[n] ?
+				igt_sriov_open_vf_drm_device(pf_fd, vf_ids[n]) :
+				drm_reopen_driver(pf_fd);
+
+		igt_assert_fd(vf_fd);
+		set->data[n].opts = opts;
+		subm_init(&set->data[n].subm, vf_fd, vf_ids[n], 0,
+			  xe_engine(vf_fd, 0)->instance);
+		subm_workload_init(&set->data[n].subm,
+				   &(struct subm_work_desc){
+					.duration_ms = job_sched_params.duration_ms,
+					.preempt = true,
+					.repeats = job_sched_params.num_repeats });
+		igt_stats_init_with_size(&set->data[n].stats.samples,
+					 set->data[n].subm.work.repeats);
+		if (set->sync_method == SYNC_BARRIER)
+			set->data[n].barrier = &set->barrier;
+	}
+
+	/* dispatch spinners, wait for results */
+	subm_set_dispatch_and_wait_threads(set);
+
+	/* verify results */
+	for (int n = 0; n < set->ndata; ++n) {
+		struct subm_stats *stats = &set->data[n].stats;
+		uint64_t elapsed_ms = (stats->end_timestamp - stats->start_timestamp) /
+				      NSEC_PER_MSEC;
+		uint64_t expected_ms = set->data[n].subm.vf_num ?
+				       job_sched_params.duration_ms * (1 + num_vfs) :
+				       job_sched_params.duration_ms;
+
+		igt_debug("[%s] elapsed_ms: %lu, expected_ms: %lu\n",
+			  set->data[n].subm.id, elapsed_ms, expected_ms);
+
+		if (set->data[n].subm.vf_num)
+			continue;
+
+		igt_assert_f(check_within_epsilon(elapsed_ms, expected_ms, opts->outlier_treshold),
+			     "[%s] workload duration: %lu ms not within +-%.0f%% of expected=%lu ms\n",
+			     set->data[n].subm.id, elapsed_ms, opts->outlier_treshold * 100, expected_ms);
+	}
+
+	/* cleanup */
+	subm_set_fini(set);
+	set_vfs_scheduling_params(pf_fd, num_vfs, &(struct vf_sched_params){});
+	xe_for_each_gt(pf_fd, gt)
+		xe_sriov_set_sched_priority(pf_fd, 0, gt, XE_SRIOV_SCHED_PRIORITY_LOW);
+	igt_sriov_disable_vfs(pf_fd);
+}
+
 static struct subm_opts subm_opts = {
 	.sync_method = SYNC_BARRIER,
 	.outlier_treshold = 0.1,
@@ -804,6 +903,18 @@ igt_main_args("", long_opts, help_str, subm_opts_handler, NULL)
 				nonpreempt_engine_resets(pf_fd, vf, &subm_opts);
 	}
 
+	igt_describe("Check PF gets priority over other VFs when configured with HIGH priority");
+	igt_subtest_with_dynamic("pf-priority") {
+		if (extended_scope)
+			for_each_sriov_num_vfs(pf_fd, vf)
+				igt_dynamic_f("numvfs-%d", vf)
+					pf_priority(pf_fd, vf, &subm_opts);
+
+		for_random_sriov_vf(pf_fd, vf)
+			igt_dynamic("numvfs-random")
+				pf_priority(pf_fd, vf, &subm_opts);
+	}
+
 	igt_fixture {
 		set_vfs_scheduling_params(pf_fd, igt_sriov_get_total_vfs(pf_fd),
 					  &(struct vf_sched_params){});
-- 
2.40.0



More information about the igt-dev mailing list