[RFC PATCH v3 06/11] drm, cgroup: Add GEM buffer allocation count stats

Kenny Ho Kenny.Ho at amd.com
Wed Jun 26 15:05:17 UTC 2019


drm.buffer.count.stats
        A read-only flat-keyed file which exists on all cgroups.  Each
        entry is keyed by the drm device's major:minor.

        Total number of GEM buffer allocated.

Change-Id: Id3e1809d5fee8562e47a7d2b961688956d844ec6
Signed-off-by: Kenny Ho <Kenny.Ho at amd.com>
---
 include/linux/cgroup_drm.h |  2 ++
 kernel/cgroup/drm.c        | 23 ++++++++++++++++++++---
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
index 126c156ffd70..e4400b21ab8e 100644
--- a/include/linux/cgroup_drm.h
+++ b/include/linux/cgroup_drm.h
@@ -20,6 +20,8 @@ struct drmcgrp_device_resource {
 
 	size_t			bo_stats_peak_allocated;
 	size_t			bo_limits_peak_allocated;
+
+	s64			bo_stats_count_allocated;
 };
 
 struct drmcgrp {
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
index 265008197654..9144f93b851f 100644
--- a/kernel/cgroup/drm.c
+++ b/kernel/cgroup/drm.c
@@ -33,6 +33,7 @@ struct drmcgrp_device {
 enum drmcgrp_res_type {
 	DRMCGRP_TYPE_BO_TOTAL,
 	DRMCGRP_TYPE_BO_PEAK,
+	DRMCGRP_TYPE_BO_COUNT,
 };
 
 enum drmcgrp_file_type {
@@ -145,6 +146,9 @@ static inline void drmcgrp_print_stats(struct drmcgrp_device_resource *ddr,
 	case DRMCGRP_TYPE_BO_PEAK:
 		seq_printf(sf, "%zu\n", ddr->bo_stats_peak_allocated);
 		break;
+	case DRMCGRP_TYPE_BO_COUNT:
+		seq_printf(sf, "%lld\n", ddr->bo_stats_count_allocated);
+		break;
 	default:
 		seq_puts(sf, "\n");
 		break;
@@ -396,6 +400,12 @@ struct cftype files[] = {
 		.private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_PEAK,
 						DRMCGRP_FTYPE_LIMIT),
 	},
+	{
+		.name = "buffer.count.stats",
+		.seq_show = drmcgrp_bo_show,
+		.private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_COUNT,
+						DRMCGRP_FTYPE_STATS),
+	},
 	{ }	/* terminate */
 };
 
@@ -518,6 +528,8 @@ void drmcgrp_chg_bo_alloc(struct drmcgrp *drmcgrp, struct drm_device *dev,
 
 		if (ddr->bo_stats_peak_allocated < (size_t)size)
 			ddr->bo_stats_peak_allocated = (size_t)size;
+
+		ddr->bo_stats_count_allocated++;
 	}
 	mutex_unlock(&known_drmcgrp_devs[devIdx]->mutex);
 }
@@ -526,15 +538,20 @@ EXPORT_SYMBOL(drmcgrp_chg_bo_alloc);
 void drmcgrp_unchg_bo_alloc(struct drmcgrp *drmcgrp, struct drm_device *dev,
 		size_t size)
 {
+	struct drmcgrp_device_resource *ddr;
 	int devIdx = dev->primary->index;
 
 	if (drmcgrp == NULL || known_drmcgrp_devs[devIdx] == NULL)
 		return;
 
 	mutex_lock(&known_drmcgrp_devs[devIdx]->mutex);
-	for ( ; drmcgrp != NULL; drmcgrp = parent_drmcgrp(drmcgrp))
-		drmcgrp->dev_resources[devIdx]->bo_stats_total_allocated
-			-= (s64)size;
+	for ( ; drmcgrp != NULL; drmcgrp = parent_drmcgrp(drmcgrp)) {
+		ddr = drmcgrp->dev_resources[devIdx];
+
+		ddr->bo_stats_total_allocated -= (s64)size;
+
+		ddr->bo_stats_count_allocated--;
+	}
 	mutex_unlock(&known_drmcgrp_devs[devIdx]->mutex);
 }
 EXPORT_SYMBOL(drmcgrp_unchg_bo_alloc);
-- 
2.21.0



More information about the amd-gfx mailing list