[RFC PATCH v3 05/11] drm, cgroup: Add peak GEM buffer allocation limit
Kenny Ho
Kenny.Ho at amd.com
Wed Jun 26 15:05:16 UTC 2019
drm.buffer.peak.stats
A read-only flat-keyed file which exists on all cgroups. Each
entry is keyed by the drm device's major:minor.
Largest GEM buffer allocated in bytes.
drm.buffer.peak.default
A read-only flat-keyed file which exists on the root cgroup.
Each entry is keyed by the drm device's major:minor.
Default limits on the largest GEM buffer allocation in bytes.
drm.buffer.peak.max
A read-write flat-keyed file which exists on all cgroups. Each
entry is keyed by the drm device's major:minor.
Per device limits on the largest GEM buffer allocation in bytes.
This is a hard limit. Attempts in allocating beyond the cgroup
limit will result in ENOMEM. Shorthand understood by memparse
(such as k, m, g) can be used.
Set largest allocation for /dev/dri/card1 to 4MB
echo "226:1 4m" > drm.buffer.peak.max
Change-Id: I0830d56775568e1cf215b56cc892d5e7945e9f25
Signed-off-by: Kenny Ho <Kenny.Ho at amd.com>
---
include/linux/cgroup_drm.h | 3 ++
kernel/cgroup/drm.c | 61 ++++++++++++++++++++++++++++++++++++++
2 files changed, 64 insertions(+)
diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
index efa019666f1c..126c156ffd70 100644
--- a/include/linux/cgroup_drm.h
+++ b/include/linux/cgroup_drm.h
@@ -17,6 +17,9 @@ struct drmcgrp_device_resource {
/* for per device stats */
s64 bo_stats_total_allocated;
s64 bo_limits_total_allocated;
+
+ size_t bo_stats_peak_allocated;
+ size_t bo_limits_peak_allocated;
};
struct drmcgrp {
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
index cfc1fe74dca3..265008197654 100644
--- a/kernel/cgroup/drm.c
+++ b/kernel/cgroup/drm.c
@@ -19,6 +19,7 @@ struct drmcgrp_device {
struct mutex mutex;
s64 bo_limits_total_allocated_default;
+ size_t bo_limits_peak_allocated_default;
};
#define DRMCG_CTF_PRIV_SIZE 3
@@ -31,6 +32,7 @@ struct drmcgrp_device {
enum drmcgrp_res_type {
DRMCGRP_TYPE_BO_TOTAL,
+ DRMCGRP_TYPE_BO_PEAK,
};
enum drmcgrp_file_type {
@@ -78,6 +80,9 @@ static inline int init_drmcgrp_single(struct drmcgrp *drmcgrp, int minor)
if (known_drmcgrp_devs[minor] != NULL) {
ddr->bo_limits_total_allocated =
known_drmcgrp_devs[minor]->bo_limits_total_allocated_default;
+
+ ddr->bo_limits_peak_allocated =
+ known_drmcgrp_devs[minor]->bo_limits_peak_allocated_default;
}
return 0;
@@ -137,6 +142,9 @@ static inline void drmcgrp_print_stats(struct drmcgrp_device_resource *ddr,
case DRMCGRP_TYPE_BO_TOTAL:
seq_printf(sf, "%lld\n", ddr->bo_stats_total_allocated);
break;
+ case DRMCGRP_TYPE_BO_PEAK:
+ seq_printf(sf, "%zu\n", ddr->bo_stats_peak_allocated);
+ break;
default:
seq_puts(sf, "\n");
break;
@@ -155,6 +163,9 @@ static inline void drmcgrp_print_limits(struct drmcgrp_device_resource *ddr,
case DRMCGRP_TYPE_BO_TOTAL:
seq_printf(sf, "%lld\n", ddr->bo_limits_total_allocated);
break;
+ case DRMCGRP_TYPE_BO_PEAK:
+ seq_printf(sf, "%zu\n", ddr->bo_limits_peak_allocated);
+ break;
default:
seq_puts(sf, "\n");
break;
@@ -174,6 +185,10 @@ static inline void drmcgrp_print_default(struct drmcgrp_device *ddev,
seq_printf(sf, "%lld\n",
ddev->bo_limits_total_allocated_default);
break;
+ case DRMCGRP_TYPE_BO_PEAK:
+ seq_printf(sf, "%zu\n",
+ ddev->bo_limits_peak_allocated_default);
+ break;
default:
seq_puts(sf, "\n");
break;
@@ -315,6 +330,23 @@ ssize_t drmcgrp_bo_limit_write(struct kernfs_open_file *of, char *buf,
ddr->bo_limits_total_allocated = val;
break;
+ case DRMCGRP_TYPE_BO_PEAK:
+ p_max = parent == NULL ? SIZE_MAX :
+ parent->dev_resources[minor]->
+ bo_limits_peak_allocated;
+
+ rc = drmcgrp_process_limit_val(sattr, true,
+ ddev->bo_limits_peak_allocated_default,
+ p_max,
+ &val);
+
+ if (rc || val < 0) {
+ drmcgrp_pr_cft_err(drmcgrp, cft_name, minor);
+ continue;
+ }
+
+ ddr->bo_limits_peak_allocated = val;
+ break;
default:
break;
}
@@ -344,6 +376,26 @@ struct cftype files[] = {
.private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_TOTAL,
DRMCGRP_FTYPE_LIMIT),
},
+ {
+ .name = "buffer.peak.stats",
+ .seq_show = drmcgrp_bo_show,
+ .private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_PEAK,
+ DRMCGRP_FTYPE_STATS),
+ },
+ {
+ .name = "buffer.peak.default",
+ .seq_show = drmcgrp_bo_show,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_PEAK,
+ DRMCGRP_FTYPE_DEFAULT),
+ },
+ {
+ .name = "buffer.peak.max",
+ .write = drmcgrp_bo_limit_write,
+ .seq_show = drmcgrp_bo_show,
+ .private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_BO_PEAK,
+ DRMCGRP_FTYPE_LIMIT),
+ },
{ } /* terminate */
};
@@ -365,6 +417,7 @@ int drmcgrp_register_device(struct drm_device *dev)
ddev->dev = dev;
ddev->bo_limits_total_allocated_default = S64_MAX;
+ ddev->bo_limits_peak_allocated_default = SIZE_MAX;
mutex_init(&ddev->mutex);
@@ -436,6 +489,11 @@ bool drmcgrp_bo_can_allocate(struct task_struct *task, struct drm_device *dev,
result = false;
break;
}
+
+ if (d->bo_limits_peak_allocated < size) {
+ result = false;
+ break;
+ }
}
mutex_unlock(&known_drmcgrp_devs[devIdx]->mutex);
@@ -457,6 +515,9 @@ void drmcgrp_chg_bo_alloc(struct drmcgrp *drmcgrp, struct drm_device *dev,
ddr = drmcgrp->dev_resources[devIdx];
ddr->bo_stats_total_allocated += (s64)size;
+
+ if (ddr->bo_stats_peak_allocated < (size_t)size)
+ ddr->bo_stats_peak_allocated = (size_t)size;
}
mutex_unlock(&known_drmcgrp_devs[devIdx]->mutex);
}
--
2.21.0
More information about the amd-gfx
mailing list