[RFC 19/23] cgroup/drm: Add scheduling weight callback

Tvrtko Ursulin tvrtko.ursulin at igalia.com
Fri May 2 12:32:52 UTC 2025


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Add a new callback via which the drm cgroup controller will be notifying
clients about their scheduling weight.

At the same time, in order to reduce the amount of tracking with drivers
which will not support any sort of control from the drm cgroup controller
side, lets express the funcionality as opt-in and use the presence of
drm_cgroup_ops as an activation criteria.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
---
 include/drm/drm_drv.h | 26 ++++++++++++++++++++++++++
 kernel/cgroup/drm.c   | 27 +++++++++++++++++++++++++++
 2 files changed, 53 insertions(+)

diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index a43d707b5f36..4267ed1e63f1 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -169,6 +169,22 @@ enum drm_driver_feature {
 	DRIVER_HAVE_IRQ			= BIT(30),
 };
 
+/**
+ * struct drm_cgroup_ops
+ *
+ * This structure contains callbacks that drivers can provide if they are able
+ * support the functionalities implemented by the DRM cgroup controller.
+ */
+struct drm_cgroup_ops {
+	/**
+	 * @notify_weight:
+	 *
+	 * Optional callback used by the DRM core to notify clients of their
+	 * scheduling weight.
+	 */
+	void (*notify_weight) (struct drm_file *, unsigned int weight);
+};
+
 /**
  * struct drm_driver - DRM driver structure
  *
@@ -431,6 +447,16 @@ struct drm_driver {
 	 * some examples.
 	 */
 	const struct file_operations *fops;
+
+#ifdef CONFIG_CGROUP_DRM
+	/**
+	 * @cg_ops:
+	 *
+	 * Optional pointer to driver callbacks facilitating integration with
+	 * the DRM cgroup controller.
+	 */
+	const struct drm_cgroup_ops *cg_ops;
+#endif
 };
 
 void *__devm_drm_dev_alloc(struct device *parent,
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
index e9dc1e7cc4a4..ea7655edf86a 100644
--- a/kernel/cgroup/drm.c
+++ b/kernel/cgroup/drm.c
@@ -7,6 +7,8 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 
+#include <drm/drm_drv.h>
+
 struct drm_cgroup_state {
 	struct cgroup_subsys_state css;
 
@@ -29,6 +31,22 @@ css_to_drmcs(struct cgroup_subsys_state *css)
 	return container_of(css, struct drm_cgroup_state, css);
 }
 
+static void __maybe_unused
+drmcs_notify_weight(struct drm_cgroup_state *drmcs)
+{
+	struct drm_file *fpriv;
+
+	lockdep_assert_held(&drmcg_mutex);
+
+	list_for_each_entry(fpriv, &drmcs->clients, clink) {
+		const struct drm_cgroup_ops *cg_ops =
+			fpriv->minor->dev->driver->cg_ops;
+
+		if (cg_ops && cg_ops->notify_weight)
+			cg_ops->notify_weight(fpriv, 0);
+	}
+}
+
 static void drmcs_free(struct cgroup_subsys_state *css)
 {
 	struct drm_cgroup_state *drmcs = css_to_drmcs(css);
@@ -59,6 +77,9 @@ void drmcgroup_client_open(struct drm_file *file_priv)
 {
 	struct drm_cgroup_state *drmcs;
 
+	if (!file_priv->minor->dev->driver->cg_ops)
+		return;
+
 	drmcs = css_to_drmcs(task_get_css(current, drm_cgrp_id));
 
 	mutex_lock(&drmcg_mutex);
@@ -74,6 +95,9 @@ void drmcgroup_client_close(struct drm_file *file_priv)
 
 	drmcs = css_to_drmcs(file_priv->__css);
 
+	if (!file_priv->minor->dev->driver->cg_ops)
+		return;
+
 	mutex_lock(&drmcg_mutex);
 	list_del(&file_priv->clink);
 	file_priv->__css = NULL;
@@ -88,6 +112,9 @@ void drmcgroup_client_migrate(struct drm_file *file_priv)
 	struct drm_cgroup_state *src, *dst;
 	struct cgroup_subsys_state *old;
 
+	if (!file_priv->minor->dev->driver->cg_ops)
+		return;
+
 	mutex_lock(&drmcg_mutex);
 
 	old = file_priv->__css;
-- 
2.48.0



More information about the amd-gfx mailing list