[Intel-gfx] [PATCH v2 4/4] drm/i915: Predictive governor to control eu/slice/subslice

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Nov 6 10:34:18 UTC 2018


On 06/11/2018 04:13, Ankit Navik wrote:
> From: Praveen Diwakar <praveen.diwakar at intel.com>
> 
> High resoluton timer is used for predictive governor to control

resolution

> eu/slice/subslice based on workloads.
> 
> Debugfs is provided to enable/disable/update timer configuration

Changelog is missing.

> 
> Signed-off-by: Praveen Diwakar <praveen.diwakar at intel.com>
> Signed-off-by: Yogesh Marathe <yogesh.marathe at intel.com>
> Signed-off-by: Aravindan Muthukumar <aravindan.muthukumar at intel.com>
> Signed-off-by: Kedar J Karanje <kedar.j.karanje at intel.com>
> Signed-off-by: Ankit Navik <ankit.p.navik at intel.com>
> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c | 88 ++++++++++++++++++++++++++++++++++++-
>   drivers/gpu/drm/i915/i915_drv.h     |  3 ++
>   2 files changed, 90 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index f9ce35d..0f368f6 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -4740,6 +4740,90 @@ static const struct drm_info_list i915_debugfs_list[] = {
>   	{"i915_drrs_status", i915_drrs_status, 0},
>   	{"i915_rps_boost_info", i915_rps_boost_info, 0},
>   };
> +
> +#define PENDING_REQ_0	0	/* No active request pending */
> +
> +/*
> + * Anything above threshold is considered as HIGH load, less is considered
> + * as LOW load and equal is considered as MEDIAUM load.

MEDIUM.

> + *
> + * The threshold value of three active requests pending.
> + */
> +#define PENDING_REQ_3	3

Is there a better name for these than number suffixes? Like maybe 
PENDING_THRESHOLD_MEDIUM or something?

> +
> +static int predictive_load_enable;
> +
> +static enum hrtimer_restart predictive_load_cb(struct hrtimer *hrtimer)
> +{
> +	struct drm_i915_private *dev_priv =
> +			container_of(hrtimer, typeof(*dev_priv), pred_timer);
> +	struct i915_gem_context *ctx;
> +	atomic_t req_pending;
> +
> +	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
> +
> +		if (!ctx->name)
> +			continue;

Put a comment saying why are you doing this.

> +
> +		mutex_lock(&dev_priv->pred_mutex);

You can't sleep in the hrtimer callback. I think I said this last time 
as well. But you also don't need to for atomic_read.

> +		atomic_set(&req_pending, atomic_read(&ctx->req_cnt));

req_pending does not need to be atomic_t.

> +		mutex_unlock(&dev_priv->pred_mutex);
> +
> +		if (atomic_read(&req_pending) == PENDING_REQ_0)
> +			continue;

By design you don't want to transition to LOW state with zero pending 
requests? If so this needs a comment.

> +
> +		if (atomic_read(&req_pending) > PENDING_REQ_3)
> +			ctx->load_type = LOAD_TYPE_HIGH;
> +		else if (atomic_read(&req_pending) == PENDING_REQ_3)
> +			ctx->load_type = LOAD_TYPE_MEDIUM;
> +		else
> +			ctx->load_type = LOAD_TYPE_LOW;
> +
> +		i915_gem_context_set_load_type(ctx, ctx->load_type);

You seem to be using ctx->load_type just for temporary storage which is 
not right.

Also, I think the usual pattern would be to set the pending type in 
i915_gem_context_set_load_type, which is then consumed (turned into 
load_type) in lower level code.

> +	}
> +
> +	hrtimer_forward_now(hrtimer,
> +			ms_to_ktime(predictive_load_enable));
> +
> +	return HRTIMER_RESTART;
> +}
> +
> +static int
> +i915_predictive_load_get(void *data, u64 *val)
> +{
> +	*val = predictive_load_enable;
> +	return 0;
> +}
> +
> +static int
> +i915_predictive_load_set(void *data, u64 val)
> +{
> +	struct drm_i915_private *dev_priv = data;
> +
> +	predictive_load_enable = val;
> +
> +	if (predictive_load_enable) {

This is still unsafe wrt sysfs set race.

> +		if (!dev_priv->predictive_load_timer_init) {
> +			hrtimer_init(&dev_priv->pred_timer, CLOCK_MONOTONIC,
> +					HRTIMER_MODE_REL);
> +			dev_priv->pred_timer.function = predictive_load_cb;
> +			dev_priv->predictive_load_timer_init = 1;

Timer init should be in dev_priv init code.

> +		}
> +
> +		hrtimer_start(&dev_priv->pred_timer,
> +			ms_to_ktime(predictive_load_enable),
> +			HRTIMER_MODE_REL_PINNED);

I think we talked about giving some slack to the timer.

> +	} else {
> +		hrtimer_cancel(&dev_priv->pred_timer);
> +	}
> +
> +	return 0;
> +}
> +
> +DEFINE_SIMPLE_ATTRIBUTE(i915_predictive_load_ctl,
> +			i915_predictive_load_get, i915_predictive_load_set,
> +			"%llu\n");
> +
>   #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
>   
>   static const struct i915_debugfs_files {
> @@ -4769,7 +4853,9 @@ static const struct i915_debugfs_files {
>   	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
>   	{"i915_ipc_status", &i915_ipc_status_fops},
>   	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
> -	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
> +	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
> +	/* FIXME: When feature will become real, move to sysfs */
> +	{"i915_predictive_load_ctl", &i915_predictive_load_ctl}
>   };
>   
>   int i915_debugfs_register(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f1b16d0..72ddd63 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1686,7 +1686,10 @@ struct drm_i915_private {
>   	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
>   	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
>   
> +	/* optimal slice/subslice/EU configration state */
>   	struct optimum_config opt_config[LOAD_TYPE_MAX];
> +	struct hrtimer pred_timer;
> +	int predictive_load_timer_init;
>   
>   	unsigned int fsb_freq, mem_freq, is_ddr3;
>   	unsigned int skl_preferred_vco_freq;
> 

Regards,

Tvrtko


More information about the Intel-gfx mailing list