[PATCH RFC v4 10/16] drm, cgroup: Add TTM buffer peak usage stats
Kenny Ho
Kenny.Ho at amd.com
Thu Aug 29 06:05:27 UTC 2019
drm.memory.peak.stats
A read-only nested-keyed file which exists on all cgroups.
Each entry is keyed by the drm device's major:minor. The
following nested keys are defined.
====== ==============================================
system Peak host memory used
tt Peak host memory used by the device (GTT/GART)
vram Peak Video RAM used by the drm device
priv Other drm device specific memory peak usage
====== ==============================================
Reading returns the following::
226:0 system=0 tt=0 vram=0 priv=0
226:1 system=0 tt=9035776 vram=17768448 priv=16809984
226:2 system=0 tt=9035776 vram=17768448 priv=16809984
Change-Id: I986e44533848f66411465bdd52105e78105a709a
Signed-off-by: Kenny Ho <Kenny.Ho at amd.com>
---
include/linux/cgroup_drm.h | 2 ++
kernel/cgroup/drm.c | 19 +++++++++++++++++++
2 files changed, 21 insertions(+)
diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
index 4c2794c9333d..9579e2a0b71d 100644
--- a/include/linux/cgroup_drm.h
+++ b/include/linux/cgroup_drm.h
@@ -20,6 +20,7 @@ enum drmcg_res_type {
DRMCG_TYPE_BO_COUNT,
DRMCG_TYPE_MEM,
DRMCG_TYPE_MEM_EVICT,
+ DRMCG_TYPE_MEM_PEAK,
__DRMCG_TYPE_LAST,
};
@@ -37,6 +38,7 @@ struct drmcg_device_resource {
s64 bo_stats_count_allocated;
s64 mem_stats[TTM_PL_PRIV+1];
+ s64 mem_peaks[TTM_PL_PRIV+1];
s64 mem_stats_evict;
};
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
index 4960a8d1e8f4..899dc44722c3 100644
--- a/kernel/cgroup/drm.c
+++ b/kernel/cgroup/drm.c
@@ -162,6 +162,13 @@ static void drmcg_print_stats(struct drmcg_device_resource *ddr,
case DRMCG_TYPE_MEM_EVICT:
seq_printf(sf, "%lld\n", ddr->mem_stats_evict);
break;
+ case DRMCG_TYPE_MEM_PEAK:
+ for (i = 0; i <= TTM_PL_PRIV; i++) {
+ seq_printf(sf, "%s=%lld ", ttm_placement_names[i],
+ ddr->mem_peaks[i]);
+ }
+ seq_puts(sf, "\n");
+ break;
default:
seq_puts(sf, "\n");
break;
@@ -443,6 +450,12 @@ struct cftype files[] = {
.private = DRMCG_CTF_PRIV(DRMCG_TYPE_MEM_EVICT,
DRMCG_FTYPE_STATS),
},
+ {
+ .name = "memory.peaks.stats",
+ .seq_show = drmcg_seq_show,
+ .private = DRMCG_CTF_PRIV(DRMCG_TYPE_MEM_PEAK,
+ DRMCG_FTYPE_STATS),
+ },
{ } /* terminate */
};
@@ -617,6 +630,8 @@ void drmcg_chg_mem(struct ttm_buffer_object *tbo)
for ( ; drmcg != NULL; drmcg = drmcg_parent(drmcg)) {
ddr = drmcg->dev_resources[devIdx];
ddr->mem_stats[mem_type] += size;
+ ddr->mem_peaks[mem_type] = max(ddr->mem_peaks[mem_type],
+ ddr->mem_stats[mem_type]);
}
mutex_unlock(&dev->drmcg_mutex);
}
@@ -668,6 +683,10 @@ void drmcg_mem_track_move(struct ttm_buffer_object *old_bo, bool evict,
ddr->mem_stats[old_mem_type] -= move_in_bytes;
ddr->mem_stats[new_mem_type] += move_in_bytes;
+ ddr->mem_peaks[new_mem_type] = max(
+ ddr->mem_peaks[new_mem_type],
+ ddr->mem_stats[new_mem_type]);
+
if (evict)
ddr->mem_stats_evict++;
}
--
2.22.0
More information about the amd-gfx
mailing list