[RFC PATCH 3/3] drm/ttm: add ttm mem trace event support
Kevin Wang
kevin1.wang at amd.com
Thu Jan 28 07:13:22 UTC 2021
add ttm memory related trace event support
trace events:
ttm:ttm_shrink
ttm:ttm_mem_global_reserve
ttm:ttm_mem_global_free
Signed-off-by: Kevin Wang <kevin1.wang at amd.com>
---
drivers/gpu/drm/ttm/ttm_memory.c | 7 ++++
drivers/gpu/drm/ttm/ttm_trace.h | 70 ++++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index acd63b70d814..27470b1f1f13 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -39,6 +39,8 @@
#include <linux/slab.h>
#include <linux/swap.h>
+#include "ttm_trace.h"
+
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_global ttm_mem_glob;
@@ -272,6 +274,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
int ret;
spin_lock(&glob->lock);
+ trace_ttm_shrink(from_wq, extra, ctx);
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
@@ -518,6 +521,8 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
zone->used_mem -= amount;
}
spin_unlock(&glob->lock);
+ trace_ttm_mem_global_free(single_zone->name, amount,
+ single_zone->used_mem, single_zone->max_mem);
}
void ttm_mem_global_free(struct ttm_mem_global *glob,
@@ -590,6 +595,8 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
ret = 0;
out_unlock:
spin_unlock(&glob->lock);
+ trace_ttm_mem_global_reserve(single_zone->name, amount,
+ single_zone->used_mem, single_zone->max_mem);
ttm_check_swapping(glob);
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_trace.h b/drivers/gpu/drm/ttm/ttm_trace.h
index 9f7cc34b243b..e25b8a2c423c 100644
--- a/drivers/gpu/drm/ttm/ttm_trace.h
+++ b/drivers/gpu/drm/ttm/ttm_trace.h
@@ -388,6 +388,76 @@ TRACE_EVENT(ttm_bo_vm_access,
__entry->offset, __entry->len, __entry->mem_type)
);
+TRACE_EVENT(ttm_shrink,
+ TP_PROTO(bool from_wq, uint64_t extra, struct ttm_operation_ctx *ctx),
+ TP_ARGS(from_wq, extra, ctx),
+ TP_STRUCT__entry(
+ __field(bool, from_wq)
+ __field(bool, interruptible)
+ __field(bool, wait_gpu)
+ __field(uint64_t, extra)
+ ),
+
+ TP_fast_assign(
+ __entry->from_wq = from_wq;
+ __entry->extra = extra;
+ __entry->interruptible= ctx->interruptible;
+ __entry->wait_gpu = !ctx->no_wait_gpu;
+ ),
+
+ TP_printk("ttm_shrink: from_wq=%s, interruptible=%s, wait_gpu=%s, extra=0x%llx(%lld)",
+ __entry->from_wq ? "true" : "false",
+ __entry->interruptible ? "true" : "false",
+ __entry->wait_gpu? "true" : "false",
+ __entry->extra, __entry->extra)
+);
+
+TRACE_EVENT(ttm_mem_global_reserve,
+ TP_PROTO(const char *zone_name, uint64_t amount,
+ uint64_t used_mem, uint64_t max_mem),
+ TP_ARGS(zone_name, amount, used_mem, max_mem),
+ TP_STRUCT__entry(
+ __string(zone, zone_name)
+ __field(uint64_t, amount)
+ __field(uint64_t, used_mem)
+ __field(uint64_t, max_mem)
+ ),
+
+ TP_fast_assign(
+ __assign_str(zone, zone_name);
+ __entry->amount = amount;
+ __entry->used_mem = used_mem;
+ __entry->max_mem = max_mem;
+ ),
+
+ TP_printk("zone:%s, amount=%lld, used=%lld/%lld",
+ __get_str(zone), __entry->amount,
+ __entry->used_mem, __entry->max_mem)
+);
+
+TRACE_EVENT(ttm_mem_global_free,
+ TP_PROTO(const char *zone_name, uint64_t amount,
+ uint64_t used_mem, uint64_t max_mem),
+ TP_ARGS(zone_name, amount, used_mem, max_mem),
+ TP_STRUCT__entry(
+ __string(zone, zone_name)
+ __field(uint64_t, amount)
+ __field(uint64_t, used_mem)
+ __field(uint64_t, max_mem)
+ ),
+
+ TP_fast_assign(
+ __assign_str(zone, zone_name);
+ __entry->amount = amount;
+ __entry->used_mem = used_mem;
+ __entry->max_mem = max_mem;
+ ),
+
+ TP_printk("zone:%s, amount=%lld, used=%lld/%lld",
+ __get_str(zone), __entry->amount,
+ __entry->used_mem, __entry->max_mem)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
--
2.17.1
More information about the amd-gfx
mailing list