[RFC v4 02/16] drm/sched: Add some more scheduling quality unit tests
Tvrtko Ursulin
tvrtko.ursulin at igalia.com
Fri Apr 25 10:20:20 UTC 2025
This time round we explore the rate of submitted job queue processing
with multiple identical parallel clients.
Example test output:
3 clients:
t cycle: min avg max : ...
+ 0ms 0 0 0 : 0 0 0
+ 102ms 2 2 2 : 2 2 2
+ 208ms 5 6 6 : 6 5 5
+ 310ms 8 9 9 : 9 9 8
...
+ 2616ms 82 83 83 : 83 83 82
+ 2717ms 83 83 83 : 83 83 83
avg_max_min_delta(x100)=60
Every 100ms for the duration of the test test logs how many jobs each
client had completed, prefixed by minimum, average and maximum numbers.
When finished overall average delta between max and min is output as a
rough indicator to scheduling fairness.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
Cc: Christian König <christian.koenig at amd.com>
Cc: Danilo Krummrich <dakr at kernel.org>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Philipp Stanner <phasta at kernel.org>
Cc: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer at amd.com>
---
.../gpu/drm/scheduler/tests/tests_scheduler.c | 186 +++++++++++++++++-
1 file changed, 185 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/scheduler/tests/tests_scheduler.c b/drivers/gpu/drm/scheduler/tests/tests_scheduler.c
index b66321ef7abe..d70b47d7bf7a 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_scheduler.c
@@ -181,6 +181,7 @@ struct drm_sched_client_params {
struct drm_sched_test_params {
const char *description;
+ unsigned int num_clients;
struct drm_sched_client_params client[2];
};
@@ -626,6 +627,189 @@ static struct kunit_suite drm_sched_scheduler_two_clients2 = {
.test_cases = drm_sched_scheduler_two_clients_tests,
};
+
+static const struct drm_sched_test_params drm_sched_many_cases[] = {
+ {
+ .description = "2 clients",
+ .num_clients = 2,
+ .client[0] = {
+ .priority = DRM_SCHED_PRIORITY_NORMAL,
+ .job_cnt = 4,
+ .job_us = 1000,
+ .wait_us = 0,
+ .sync = true,
+ },
+ },
+ {
+ .description = "3 clients",
+ .num_clients = 3,
+ .client[0] = {
+ .priority = DRM_SCHED_PRIORITY_NORMAL,
+ .job_cnt = 4,
+ .job_us = 1000,
+ .wait_us = 0,
+ .sync = true,
+ },
+ },
+ {
+ .description = "7 clients",
+ .num_clients = 7,
+ .client[0] = {
+ .priority = DRM_SCHED_PRIORITY_NORMAL,
+ .job_cnt = 4,
+ .job_us = 1000,
+ .wait_us = 0,
+ .sync = true,
+ },
+ },
+ {
+ .description = "13 clients",
+ .num_clients = 13,
+ .client[0] = {
+ .priority = DRM_SCHED_PRIORITY_NORMAL,
+ .job_cnt = 4,
+ .job_us = 1000,
+ .wait_us = 0,
+ .sync = true,
+ },
+ },
+ {
+ .description = "31 clients",
+ .num_clients = 31,
+ .client[0] = {
+ .priority = DRM_SCHED_PRIORITY_NORMAL,
+ .job_cnt = 2,
+ .job_us = 1000,
+ .wait_us = 0,
+ .sync = true,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM(drm_sched_scheduler_many_clients,
+ drm_sched_many_cases,
+ drm_sched_desc);
+
+static void drm_sched_scheduler_many_clients_test(struct kunit *test)
+{
+ const struct drm_sched_test_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ const unsigned int clients = params->num_clients;
+ unsigned int i, j, delta_total = 0, loops = 0;
+ struct test_client *client;
+ unsigned int *prev_cycle;
+ ktime_t start;
+ char *buf;
+
+ /*
+ * Many clients with deep-ish async queues.
+ */
+
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ client = kunit_kcalloc(test, clients, sizeof(*client), GFP_KERNEL);
+ prev_cycle = kunit_kcalloc(test, clients, sizeof(*prev_cycle),
+ GFP_KERNEL);
+
+ for (i = 0; i < clients; i++)
+ client[i].entity =
+ drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < clients; i++) {
+ client[i].test = test;
+ client[i].id = i;
+ client[i].params = params->client[0];
+ client[i].duration = ms_to_ktime(1000 / clients);
+ client[i].cycle_time.min_us = ~0UL;
+ client[i].latency_time.min_us = ~0UL;
+ client[i].worker =
+ kthread_create_worker(0, "%s-%u", __func__, i);
+ if (IS_ERR(client[i].worker)) {
+ for (j = 0; j < i; j++)
+ kthread_destroy_worker(client[j].worker);
+ KUNIT_FAIL(test, "Failed to create worker!\n");
+ }
+
+ kthread_init_work(&client[i].work, drm_sched_client_work);
+ }
+
+ for (i = 0; i < clients; i++)
+ kthread_queue_work(client[i].worker, &client[i].work);
+
+ start = ktime_get();
+ pr_info("%u clients:\n\tt\t\tcycle:\t min avg max : ...\n", clients);
+ for (;;) {
+ unsigned int min = ~0;
+ unsigned int max = 0;
+ unsigned int total = 0;
+ bool done = true;
+ char pbuf[16];
+
+ memset(buf, 0, PAGE_SIZE);
+ for (i = 0; i < clients; i++) {
+ unsigned int cycle, cycles;
+
+ cycle = READ_ONCE(client[i].cycle);
+ cycles = READ_ONCE(client[i].cycles);
+
+ snprintf(pbuf, sizeof(pbuf), " %3d", cycle);
+ strncat(buf, pbuf, PAGE_SIZE);
+
+ total += cycle;
+ if (cycle < min)
+ min = cycle;
+ if (cycle > max)
+ max = cycle;
+
+ if (!min || (cycle + 1) < cycles)
+ done = false;
+ }
+
+ loops++;
+ delta_total += max - min;
+
+ pr_info("\t+%6lldms\t\t %3u %3u %3u :%s\n",
+ ktime_to_ms(ktime_sub(ktime_get(), start)),
+ min, DIV_ROUND_UP(total, clients), max, buf);
+
+ if (done)
+ break;
+
+ msleep(100);
+ }
+
+ pr_info(" avg_max_min_delta(x100)=%u\n",
+ loops ? DIV_ROUND_UP(delta_total * 100, loops) : 0);
+
+ for (i = 0; i < clients; i++) {
+ kthread_flush_work(&client[i].work);
+ kthread_destroy_worker(client[i].worker);
+ }
+
+ for (i = 0; i < clients; i++)
+ drm_mock_sched_entity_free(client[i].entity);
+}
+
+static const struct kunit_attributes drm_sched_scheduler_many_clients_attr = {
+ .speed = KUNIT_SPEED_SLOW,
+};
+
+static struct kunit_case drm_sched_scheduler_many_clients_tests[] = {
+ KUNIT_CASE_PARAM_ATTR(drm_sched_scheduler_many_clients_test,
+ drm_sched_scheduler_many_clients_gen_params,
+ drm_sched_scheduler_many_clients_attr),
+ {}
+};
+
+static struct kunit_suite drm_sched_scheduler_many_clients = {
+ .name = "drm_sched_scheduler_many_clients_tests",
+ .init = drm_sched_scheduler_init2,
+ .exit = drm_sched_scheduler_exit,
+ .test_cases = drm_sched_scheduler_many_clients_tests,
+};
+
kunit_test_suites(&drm_sched_scheduler_overhead,
&drm_sched_scheduler_two_clients1,
- &drm_sched_scheduler_two_clients2);
+ &drm_sched_scheduler_two_clients2,
+ &drm_sched_scheduler_many_clients);
--
2.48.0
More information about the amd-gfx
mailing list