[Intel-xe] [PATCH 7/7] drm/xe: Implement fdinfo memory stats printing

Jani Nikula jani.nikula at linux.intel.com
Tue Sep 12 11:14:45 UTC 2023


On Tue, 12 Sep 2023, "Ghimiray, Himal Prasad" <himal.prasad.ghimiray at intel.com> wrote:
> On 31-08-2023 14:35, Tejas Upadhyay wrote:
>> Use the newly added drm_print_memory_stats helper to show memory
>> utilisation of our objects in drm/driver specific fdinfo output.
>>
>> To collect the stats we walk the per memory regions object lists
>> and accumulate object size into the respective drm_memory_stats
>> categories.
>>
>> Objects with multiple possible placements are reported in multiple
>> regions for total and shared sizes, while other categories are
>> counted only for the currently active region.
>>
>> Signed-off-by: Tejas Upadhyay<tejas.upadhyay at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_bo.h         |  11 +++
>>   drivers/gpu/drm/xe/xe_drm_client.c | 103 ++++++++++++++++++++++++++++-
>>   2 files changed, 113 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
>> index 0823dda0f31b..538a9ec7f4c3 100644
>> --- a/drivers/gpu/drm/xe/xe_bo.h
>> +++ b/drivers/gpu/drm/xe/xe_bo.h
>> @@ -6,6 +6,8 @@
>>   #ifndef _XE_BO_H_
>>   #define _XE_BO_H_
>>   
>> +#include <drm/ttm/ttm_tt.h>
>> +
>>   #include "xe_bo_types.h"
>>   #include "xe_macros.h"
>>   #include "xe_vm_types.h"
>> @@ -258,6 +260,15 @@ static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
>>   	return PAGE_ALIGN(bo->ttm.base.size);
>>   }
>>   
>> +static inline bool xe_bo_has_pages(struct xe_bo *bo)
>> +{
>> +	if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
>> +	    xe_bo_is_vram(bo))
>> +		return true;
>> +
>> +	return false;
>> +}
>> +
>>   void __xe_bo_release_dummy(struct kref *kref);
>>   
>>   /**
>> diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
>> index 00b28a08909f..28b4c8e608f8 100644
>> --- a/drivers/gpu/drm/xe/xe_drm_client.c
>> +++ b/drivers/gpu/drm/xe/xe_drm_client.c
>> @@ -4,10 +4,12 @@
>>    */
>>   
>>   #include <drm/drm_print.h>
>> +#include <drm/xe_drm.h>
>>   #include <linux/kernel.h>
>>   #include <linux/slab.h>
>>   #include <linux/types.h>
>>   
>> +#include "xe_bo.h"
>>   #include "xe_bo_types.h"
>>   #include "xe_device_types.h"
>>   #include "xe_drm_client.h"
>> @@ -110,6 +112,105 @@ bool xe_drm_client_remove_bo(struct xe_bo *bo)
>>   	return true;
>>   }
>>   
>> +static void bo_meminfo(struct xe_bo *bo,
>> +		       struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
>> +{
>> +	u64 sz = bo->size;
>> +	u32 mem_type;
>> +
>> +	if (bo->placement.placement)
>> +		mem_type = bo->placement.placement->mem_type;
>> +	else
>> +		mem_type = XE_PL_TT;
>> +
>> +	if (bo->ttm.base.handle_count > 1)
>> +		stats[mem_type].shared += sz;
>> +	else
>> +		stats[mem_type].private += sz;
>> +
>> +	if (xe_bo_has_pages(bo)) {
>> +		stats[mem_type].resident += sz;
>> +
>> +		if (!dma_resv_test_signaled(bo->ttm.base.resv,
>> +					    dma_resv_usage_rw(true) |
>> +					    DMA_RESV_USAGE_BOOKKEEP))
>> +			stats[mem_type].active += sz;
>> +		else if (mem_type == XE_PL_SYSTEM)
>> +			stats[mem_type].purgeable += sz;
>> +	}
>> +}
>> +
>> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
>> +{
>> +	struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
>> +	struct xe_file *xef = file->driver_priv;
>> +	struct ttm_device *bdev = &xef->xe->ttm;
>> +	struct ttm_resource_manager *man;
>> +	struct xe_drm_client *client;
>> +	struct list_head *pos;
>> +	struct xe_bo *bo;
>> +	unsigned int id;
>> +	char name[16];
>> +	u32 mem_type;
>> +	int ret = 0;
>> +
>> +	client = xef->client;
>> +
>> +	/* Public objects. */
>> +	spin_lock(&file->table_lock);
>> +	idr_for_each_entry(&file->object_idr, bo, id)
>> +		bo_meminfo(bo, stats);
>> +	spin_unlock(&file->table_lock);
>> +
>> +	/* Internal objects. */
>> +	rcu_read_lock();
>> +	list_for_each_rcu(pos, &client->bos_list) {
>> +		bo = xe_bo_get(list_entry(pos, typeof(*bo),
>> +					  client_link));
>> +		if (!bo)
>> +			continue;
>> +		bo_meminfo(bo, stats);
>> +		xe_bo_put(bo);
>> +	}
>> +	rcu_read_unlock();
>> +	for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
>
> can avoid this switch case with something like
>
> const char* mem_type_to_name []  = {
>
> "system",  /*XE_PL_STOLEN */
>
> "gtt",
>
> "vram0"
>
> "vram1"
>
> NULL,
>
> NULL,
>
> "stolen",
>
> },

Usually better to use designated initializers for this.

>
>> +
>> +		switch (mem_type) {
>> +		case XE_PL_SYSTEM:
>> +			sprintf(name, "system");
>> +			break;
>> +		case XE_PL_TT:
>> +			sprintf(name, "gtt");
>> +			break;
>> +		case XE_PL_VRAM0:
>> +			sprintf(name, "vram%d", mem_type - XE_PL_VRAM0);
>> +			break;
>> +		case XE_PL_VRAM1:
>> +			sprintf(name, "vram%d", mem_type - XE_PL_VRAM0);
>> +			break;
>> +		case XE_PL_STOLEN:
>> +			sprintf(name, "stolen");
>> +			break;
>> +		default:
>> +			ret = -EINVAL;
>> +			break;
>> +		}
>> +
>> +		if (ret == -EINVAL)
>> +			continue;
>
> if (! mem_type_to_name[mem_type])

Usually better to add a function wrapping the access with bounds checks.

BR,
Jani.

>
> continue;
>
>> +
>> +		man = ttm_manager_type(bdev, mem_type);
> I assume on single tile platform man will be null for vram1 ? Please 
> confirm.
>> +
>> +		if (man) {
>> +			drm_print_memory_stats(p,
>> +					       &stats[mem_type],
>> +					       DRM_GEM_OBJECT_RESIDENT |
>> +					       DRM_GEM_OBJECT_PURGEABLE,
>> +					       name);
>
> instead of name can use mem_type_to_name[mem_type].
>
> Will leave it up to decide whether to use switch-case or const char*.
>
> Ensure DRM_GEM_OBJECT_PURGEABLE is not reported for vram0 and vram1.
>
> BR
>
> Himal Ghimiray
>
>> +		}
>> +	}
>> +}
>> +
>>   /**
>>    * xe_drm_client_fdinfo() - Callback for fdinfo interface
>>    * @p: The drm_printer ptr
>> @@ -122,6 +223,6 @@ bool xe_drm_client_remove_bo(struct xe_bo *bo)
>>    */
>>   void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
>>   {
>> -	/* show_meminfo() will be developed here */
>> +	show_meminfo(p, file);
>>   }
>>   #endif

-- 
Jani Nikula, Intel


More information about the Intel-xe mailing list