<div dir="ltr"><div dir="ltr"> </div><div dir="ltr"><div dir="ltr"><br></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Fri, Mar 1, 2024 at 10:54 AM Rob Clark <<a href="mailto:robdclark@gmail.com" target="_blank">robdclark@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">From: Rob Clark <<a href="mailto:robdclark@chromium.org" target="_blank">robdclark@chromium.org</a>><br>
<br>
Perfetto can use these traces to track global and per-process GPU memory<br>
usage.<br>
<br>
Signed-off-by: Rob Clark <<a href="mailto:robdclark@chromium.org" target="_blank">robdclark@chromium.org</a>><br>
---<br>
I realized the tracepoint that perfetto uses to show GPU memory usage<br>
globally and per-process was already upstream, but with no users.<br>
<br>
This overlaps a bit with fdinfo, but ftrace is a lighter weight<br>
mechanism and fits better with perfetto (plus is already supported in<br>
trace_processor and perfetto UI, whereas something fdinfo based would<br>
require new code to be added in perfetto.<br>
<br>
We could probably do this more globally (ie. drm_gem_get/put_pages() and<br>
drm_gem_handle_create_tail()/drm_gem_object_release_handle() if folks<br>
prefer. Not sure where that leaves the TTM drivers.<br>
<br>
drivers/gpu/drm/msm/Kconfig | 1 +<br>
drivers/gpu/drm/msm/msm_drv.h | 5 +++++<br>
drivers/gpu/drm/msm/msm_gem.c | 37 +++++++++++++++++++++++++++++++++++<br>
drivers/gpu/drm/msm/msm_gpu.h | 8 ++++++++<br>
4 files changed, 51 insertions(+)<br>
<br>
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig<br>
index f202f26adab2..e4c912fcaf22 100644<br>
--- a/drivers/gpu/drm/msm/Kconfig<br>
+++ b/drivers/gpu/drm/msm/Kconfig<br>
@@ -33,6 +33,7 @@ config DRM_MSM<br>
select PM_OPP<br>
select NVMEM<br>
select PM_GENERIC_DOMAINS<br>
+ select TRACE_GPU_MEM<br>
help<br>
DRM/KMS driver for MSM/snapdragon.<br>
<br>
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h<br>
index 16a7cbc0b7dd..cb8f7e804b5b 100644<br>
--- a/drivers/gpu/drm/msm/msm_drv.h<br>
+++ b/drivers/gpu/drm/msm/msm_drv.h<br>
@@ -137,6 +137,11 @@ struct msm_drm_private {<br>
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */<br>
struct msm_perf_state *perf;<br>
<br>
+ /**<br>
+ * total_mem: Total/global amount of memory backing GEM objects.<br>
+ */<br>
+ atomic64_t total_mem;<br>
+<br>
/**<br>
* List of all GEM objects (mainly for debugfs, protected by obj_lock<br>
* (acquire before per GEM object lock)<br>
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c<br>
index 175ee4ab8a6f..e04c4af5d154 100644<br>
--- a/drivers/gpu/drm/msm/msm_gem.c<br>
+++ b/drivers/gpu/drm/msm/msm_gem.c<br>
@@ -12,6 +12,9 @@<br>
#include <linux/pfn_t.h><br>
<br>
#include <drm/drm_prime.h><br>
+#include <drm/drm_file.h><br>
+<br>
+#include <trace/events/gpu_mem.h><br>
<br>
#include "msm_drv.h"<br>
#include "msm_fence.h"<br>
@@ -33,6 +36,34 @@ static bool use_pages(struct drm_gem_object *obj)<br>
return !msm_obj->vram_node;<br>
}<br>
<br>
+static void update_device_mem(struct msm_drm_private *priv, ssize_t size)<br>
+{<br>
+ uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);<br>
+ trace_gpu_mem_total(0, 0, total_mem);<br>
+}<br>
+<br>
+static void update_ctx_mem(struct drm_file *file, ssize_t size)<br>
+{<br>
+ struct msm_file_private *ctx = file->driver_priv;<br>
+ uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);<br>
+<br>
+ rcu_read_lock(); /* Locks file->pid! */<br>
+ trace_gpu_mem_total(0, pid_nr(file->pid), ctx_mem);<br>
+ rcu_read_unlock();<br>
+<br>
+}<br>
+<br>
+static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)<br>
+{<br>
+ update_ctx_mem(file, obj->size);<br>
+ return 0;<br>
+}<br>
+<br>
+static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)<br>
+{<br>
+ update_ctx_mem(file, -obj->size);<br>
+}<br>
+<br>
/*<br>
* Cache sync.. this is a bit over-complicated, to fit dma-mapping<br>
* API. Really GPU cache is out of scope here (handled on cmdstream)<br>
@@ -156,6 +187,8 @@ static struct page **get_pages(struct drm_gem_object *obj)<br>
return p;<br>
}<br>
<br>
+ update_device_mem(dev->dev_private, obj->size);<br>
+<br>
msm_obj->pages = p;<br>
<br>
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);<br>
@@ -209,6 +242,8 @@ static void put_pages(struct drm_gem_object *obj)<br>
msm_obj->sgt = NULL;<br>
}<br>
<br>
+ update_device_mem(obj->dev->dev_private, -obj->size);<br>
+<br>
if (use_pages(obj))<br>
drm_gem_put_pages(obj, msm_obj->pages, true, false);<br>
else<br>
@@ -1118,6 +1153,8 @@ static const struct vm_operations_struct vm_ops = {<br>
<br>
static const struct drm_gem_object_funcs msm_gem_object_funcs = {<br>
.free = msm_gem_free_object,<br>
+ .open = msm_gem_open,<br>
+ .close = msm_gem_close,<br>
.pin = msm_gem_prime_pin,<br>
.unpin = msm_gem_prime_unpin,<br>
.get_sg_table = msm_gem_prime_get_sg_table,<br>
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h<br>
index 2bfcb222e353..f7d2a7d6f8cc 100644<br>
--- a/drivers/gpu/drm/msm/msm_gpu.h<br>
+++ b/drivers/gpu/drm/msm/msm_gpu.h<br>
@@ -428,6 +428,14 @@ struct msm_file_private {<br>
* level.<br>
*/<br>
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];<br>
+<br>
+ /**<br>
+ * ctx_mem:<br>
+ *<br>
+ * Total amount of memory of GEM buffers with handles attached for<br>
+ * this context.<br>
+ */<br>
+ atomic64_t ctx_mem;<br>
};<br></blockquote><div><br></div><div><br></div><div>Just for added context, past discussions on TRACE_GPU_MEM:</div><div><br></div><div><a href="https://lists.freedesktop.org/archives/dri-devel/2021-October/328260.html" target="_blank">https://lists.freedesktop.org/archives/dri-devel/2021-October/328260.html</a><br></div><div><a href="https://lists.freedesktop.org/archives/dri-devel/2021-January/295120.html" target="_blank">https://lists.freedesktop.org/archives/dri-devel/2021-January/295120.html</a><br></div><div><br></div><div>Some have even suggested deleting the tracepoint altogether.</div><div><br></div><div>Personally, I think we should land an internal user in a non-breaking way, since userspace (Perfetto) already depends on it. Right now, we're in limbo for multiple years ...</div><div><br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
/**<br>
-- <br>
2.44.0<br>
<br>
</blockquote></div></div>
</div>