[RFC PATCH v3 08/11] drm, cgroup: Add TTM buffer peak usage stats

Daniel Vetter daniel at ffwll.ch
Wed Jun 26 16:16:12 UTC 2019


On Wed, Jun 26, 2019 at 11:05:19AM -0400, Kenny Ho wrote:
> drm.memory.peak.stats
>         A read-only nested-keyed file which exists on all cgroups.
>         Each entry is keyed by the drm device's major:minor.  The
>         following nested keys are defined.
> 
>           ======         ==============================================
>           system         Peak host memory used
>           tt             Peak host memory used by the device (GTT/GART)
>           vram           Peak Video RAM used by the drm device
>           priv           Other drm device specific memory peak usage
>           ======         ==============================================
> 
>         Reading returns the following::
> 
>         226:0 system=0 tt=0 vram=0 priv=0
>         226:1 system=0 tt=9035776 vram=17768448 priv=16809984
>         226:2 system=0 tt=9035776 vram=17768448 priv=16809984
> 
> Change-Id: I986e44533848f66411465bdd52105e78105a709a
> Signed-off-by: Kenny Ho <Kenny.Ho at amd.com>

Same concerns as with the previous patch, a bit too much ttm in here.
Otherwise looks like useful information, and wont need driver changes
anywhere.
-Daniel

> ---
>  include/linux/cgroup_drm.h |  1 +
>  kernel/cgroup/drm.c        | 20 ++++++++++++++++++++
>  2 files changed, 21 insertions(+)
> 
> diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
> index 141bea06f74c..922529641df5 100644
> --- a/include/linux/cgroup_drm.h
> +++ b/include/linux/cgroup_drm.h
> @@ -25,6 +25,7 @@ struct drmcgrp_device_resource {
>  	s64			bo_stats_count_allocated;
>  
>  	s64			mem_stats[TTM_PL_PRIV+1];
> +	s64			mem_peaks[TTM_PL_PRIV+1];
>  	s64			mem_stats_evict;
>  };
>  
> diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
> index 5aee42a628c1..5f5fa6a2b068 100644
> --- a/kernel/cgroup/drm.c
> +++ b/kernel/cgroup/drm.c
> @@ -38,6 +38,7 @@ enum drmcgrp_res_type {
>  	DRMCGRP_TYPE_BO_COUNT,
>  	DRMCGRP_TYPE_MEM,
>  	DRMCGRP_TYPE_MEM_EVICT,
> +	DRMCGRP_TYPE_MEM_PEAK,
>  };
>  
>  enum drmcgrp_file_type {
> @@ -171,6 +172,13 @@ static inline void drmcgrp_print_stats(struct drmcgrp_device_resource *ddr,
>  	case DRMCGRP_TYPE_MEM_EVICT:
>  		seq_printf(sf, "%lld\n", ddr->mem_stats_evict);
>  		break;
> +	case DRMCGRP_TYPE_MEM_PEAK:
> +		for (i = 0; i <= TTM_PL_PRIV; i++) {
> +			seq_printf(sf, "%s=%lld ", ttm_placement_names[i],
> +					ddr->mem_peaks[i]);
> +		}
> +		seq_puts(sf, "\n");
> +		break;
>  	default:
>  		seq_puts(sf, "\n");
>  		break;
> @@ -440,6 +448,12 @@ struct cftype files[] = {
>  		.private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_MEM_EVICT,
>  						DRMCGRP_FTYPE_STATS),
>  	},
> +	{
> +		.name = "memory.peaks.stats",
> +		.seq_show = drmcgrp_bo_show,
> +		.private = DRMCG_CTF_PRIV(DRMCGRP_TYPE_MEM_PEAK,
> +						DRMCGRP_FTYPE_STATS),
> +	},
>  	{ }	/* terminate */
>  };
>  
> @@ -608,6 +622,8 @@ void drmcgrp_chg_mem(struct ttm_buffer_object *tbo)
>  	for ( ; drmcgrp != NULL; drmcgrp = parent_drmcgrp(drmcgrp)) {
>  		ddr = drmcgrp->dev_resources[devIdx];
>  		ddr->mem_stats[mem_type] += size;
> +		ddr->mem_peaks[mem_type] = max(ddr->mem_peaks[mem_type],
> +				ddr->mem_stats[mem_type]);
>  	}
>  	mutex_unlock(&known_drmcgrp_devs[devIdx]->mutex);
>  }
> @@ -662,6 +678,10 @@ void drmcgrp_mem_track_move(struct ttm_buffer_object *old_bo, bool evict,
>  		ddr->mem_stats[old_mem_type] -= move_in_bytes;
>  		ddr->mem_stats[new_mem_type] += move_in_bytes;
>  
> +		ddr->mem_peaks[new_mem_type] = max(
> +				ddr->mem_peaks[new_mem_type],
> +				ddr->mem_stats[new_mem_type]);
> +
>  		if (evict)
>  			ddr->mem_stats_evict++;
>  	}
> -- 
> 2.21.0
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the amd-gfx mailing list