[Intel-xe] [PATCH 7/7] drm/xe: Implement fdinfo memory stats printing
Upadhyay, Tejas
tejas.upadhyay at intel.com
Thu Aug 31 12:19:26 UTC 2023
> -----Original Message-----
> From: Upadhyay, Tejas <tejas.upadhyay at intel.com>
> Sent: Thursday, August 31, 2023 2:36 PM
> To: intel-xe at lists.freedesktop.org
> Cc: Iddamsetty, Aravind <aravind.iddamsetty at intel.com>; Ghimiray, Himal
> Prasad <himal.prasad.ghimiray at intel.com>; Upadhyay, Tejas
> <tejas.upadhyay at intel.com>
> Subject: [PATCH 7/7] drm/xe: Implement fdinfo memory stats printing
>
> Use the newly added drm_print_memory_stats helper to show memory
> utilisation of our objects in drm/driver specific fdinfo output.
>
> To collect the stats we walk the per memory regions object lists and
> accumulate object size into the respective drm_memory_stats categories.
>
> Objects with multiple possible placements are reported in multiple regions for
> total and shared sizes, while other categories are counted only for the
> currently active region.
This is not true, Initially I added for possible placements but now only counting for current memory region, I will update commit message in next revision, please ignore last para.
Thanks,
Tejas
>
> Signed-off-by: Tejas Upadhyay <tejas.upadhyay at intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo.h | 11 +++
> drivers/gpu/drm/xe/xe_drm_client.c | 103
> ++++++++++++++++++++++++++++-
> 2 files changed, 113 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index
> 0823dda0f31b..538a9ec7f4c3 100644
> --- a/drivers/gpu/drm/xe/xe_bo.h
> +++ b/drivers/gpu/drm/xe/xe_bo.h
> @@ -6,6 +6,8 @@
> #ifndef _XE_BO_H_
> #define _XE_BO_H_
>
> +#include <drm/ttm/ttm_tt.h>
> +
> #include "xe_bo_types.h"
> #include "xe_macros.h"
> #include "xe_vm_types.h"
> @@ -258,6 +260,15 @@ static inline size_t xe_bo_ccs_pages_start(struct
> xe_bo *bo)
> return PAGE_ALIGN(bo->ttm.base.size);
> }
>
> +static inline bool xe_bo_has_pages(struct xe_bo *bo) {
> + if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
> + xe_bo_is_vram(bo))
> + return true;
> +
> + return false;
> +}
> +
> void __xe_bo_release_dummy(struct kref *kref);
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_drm_client.c
> b/drivers/gpu/drm/xe/xe_drm_client.c
> index 00b28a08909f..28b4c8e608f8 100644
> --- a/drivers/gpu/drm/xe/xe_drm_client.c
> +++ b/drivers/gpu/drm/xe/xe_drm_client.c
> @@ -4,10 +4,12 @@
> */
>
> #include <drm/drm_print.h>
> +#include <drm/xe_drm.h>
> #include <linux/kernel.h>
> #include <linux/slab.h>
> #include <linux/types.h>
>
> +#include "xe_bo.h"
> #include "xe_bo_types.h"
> #include "xe_device_types.h"
> #include "xe_drm_client.h"
> @@ -110,6 +112,105 @@ bool xe_drm_client_remove_bo(struct xe_bo *bo)
> return true;
> }
>
> +static void bo_meminfo(struct xe_bo *bo,
> + struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
> {
> + u64 sz = bo->size;
> + u32 mem_type;
> +
> + if (bo->placement.placement)
> + mem_type = bo->placement.placement->mem_type;
> + else
> + mem_type = XE_PL_TT;
> +
> + if (bo->ttm.base.handle_count > 1)
> + stats[mem_type].shared += sz;
> + else
> + stats[mem_type].private += sz;
> +
> + if (xe_bo_has_pages(bo)) {
> + stats[mem_type].resident += sz;
> +
> + if (!dma_resv_test_signaled(bo->ttm.base.resv,
> + dma_resv_usage_rw(true) |
> + DMA_RESV_USAGE_BOOKKEEP))
> + stats[mem_type].active += sz;
> + else if (mem_type == XE_PL_SYSTEM)
> + stats[mem_type].purgeable += sz;
> + }
> +}
> +
> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> +{
> + struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
> + struct xe_file *xef = file->driver_priv;
> + struct ttm_device *bdev = &xef->xe->ttm;
> + struct ttm_resource_manager *man;
> + struct xe_drm_client *client;
> + struct list_head *pos;
> + struct xe_bo *bo;
> + unsigned int id;
> + char name[16];
> + u32 mem_type;
> + int ret = 0;
> +
> + client = xef->client;
> +
> + /* Public objects. */
> + spin_lock(&file->table_lock);
> + idr_for_each_entry(&file->object_idr, bo, id)
> + bo_meminfo(bo, stats);
> + spin_unlock(&file->table_lock);
> +
> + /* Internal objects. */
> + rcu_read_lock();
> + list_for_each_rcu(pos, &client->bos_list) {
> + bo = xe_bo_get(list_entry(pos, typeof(*bo),
> + client_link));
> + if (!bo)
> + continue;
> + bo_meminfo(bo, stats);
> + xe_bo_put(bo);
> + }
> + rcu_read_unlock();
> + for (mem_type = XE_PL_SYSTEM; mem_type <
> TTM_NUM_MEM_TYPES;
> +++mem_type) {
> +
> + switch (mem_type) {
> + case XE_PL_SYSTEM:
> + sprintf(name, "system");
> + break;
> + case XE_PL_TT:
> + sprintf(name, "gtt");
> + break;
> + case XE_PL_VRAM0:
> + sprintf(name, "vram%d", mem_type - XE_PL_VRAM0);
> + break;
> + case XE_PL_VRAM1:
> + sprintf(name, "vram%d", mem_type - XE_PL_VRAM0);
> + break;
> + case XE_PL_STOLEN:
> + sprintf(name, "stolen");
> + break;
> + default:
> + ret = -EINVAL;
> + break;
> + }
> +
> + if (ret == -EINVAL)
> + continue;
> +
> + man = ttm_manager_type(bdev, mem_type);
> +
> + if (man) {
> + drm_print_memory_stats(p,
> + &stats[mem_type],
> + DRM_GEM_OBJECT_RESIDENT |
> + DRM_GEM_OBJECT_PURGEABLE,
> + name);
> + }
> + }
> +}
> +
> /**
> * xe_drm_client_fdinfo() - Callback for fdinfo interface
> * @p: The drm_printer ptr
> @@ -122,6 +223,6 @@ bool xe_drm_client_remove_bo(struct xe_bo *bo)
> */
> void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file) {
> - /* show_meminfo() will be developed here */
> + show_meminfo(p, file);
> }
> #endif
> --
> 2.25.1
More information about the Intel-xe
mailing list