[Intel-gfx] [PATCH v2 21/21] drm/i915: power domains: add vlv power wells

Jesse Barnes jbarnes at virtuousgeek.org
Thu Mar 6 21:29:24 CET 2014


On Tue,  4 Mar 2014 19:23:10 +0200
Imre Deak <imre.deak at intel.com> wrote:

> Based on an early draft from Jesse.
> 
> Add support for powering on/off the dynamic power wells on VLV by
> registering its display and dpio dynamic power wells with the power
> domain framework.
> 
> For now power on all PHY TX lanes regardless of the actual lane
> configuration. Later this can be optimized when the PHY side setup
> enables only the required lanes. Atm, it enables all lanes in all
> cases.
> 
> v2:
> - undef function local COND macro after its last use (Ville)
> - Take dev_priv->irq_lock around the whole sequence of
>   intel_set_cpu_fifo_underrun_reporting_nolock() and
>   valleyview_disable_display_irqs(). They are short and releasing
>   the lock in between only makes proving correctness more difficult.
> - sanitize local var names in vlv_power_well_enabled()
> 
> Signed-off-by: Imre Deak <imre.deak at intel.com>
> ---
>  drivers/gpu/drm/i915/i915_dma.c      |   1 -
>  drivers/gpu/drm/i915/i915_drv.h      |   2 +-
>  drivers/gpu/drm/i915/intel_display.c |   1 +
>  drivers/gpu/drm/i915/intel_drv.h     |   2 +
>  drivers/gpu/drm/i915/intel_pm.c      | 237 +++++++++++++++++++++++++++++++++++
>  5 files changed, 241 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> index dca4dc3..f8f7a59 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -1668,7 +1668,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
>  		goto out_mtrrfree;
>  	}
>  
> -	dev_priv->display_irqs_enabled = true;
>  	intel_irq_init(dev);
>  	intel_uncore_sanitize(dev);
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index fea0216..3fad6ed 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1054,7 +1054,7 @@ struct i915_power_well {
>  	/* power well enable/disable usage count */
>  	int count;
>  	unsigned long domains;
> -	void *data;
> +	unsigned long data;
>  	const struct i915_power_well_ops *ops;
>  };
>  
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 6fb40cb..b2eb07a 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -4224,6 +4224,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
>  
>  	if (req_cdclk != cur_cdclk)
>  		valleyview_set_cdclk(dev, req_cdclk);
> +	modeset_update_power_wells(dev);
>  }
>  
>  static void valleyview_crtc_enable(struct drm_crtc *crtc)
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index afc01a4..f2555ac 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -609,6 +609,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
>  /* i915_irq.c */
>  bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
>  					   enum pipe pipe, bool enable);
> +bool intel_set_cpu_fifo_underrun_reporting_nolock(struct drm_device *dev,
> +						  enum pipe pipe, bool enable);
>  bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
>  					   enum transcoder pch_transcoder,
>  					   bool enable);
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 39acffd..b8b5e3e 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -5356,6 +5356,141 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
>  	return true;
>  }
>  
> +static void vlv_set_power_well(struct drm_i915_private *dev_priv,
> +			       struct i915_power_well *power_well, bool enable)
> +{
> +	enum punit_power_well power_well_id = power_well->data;
> +	u32 mask;
> +	u32 state;
> +	u32 ctrl;
> +
> +	mask = PUNIT_PWRGT_MASK(power_well_id);
> +	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
> +			 PUNIT_PWRGT_PWR_GATE(power_well_id);
> +
> +	mutex_lock(&dev_priv->rps.hw_lock);
> +
> +#define COND \
> +	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
> +
> +	if (COND)
> +		goto out;
> +
> +	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
> +	ctrl &= ~mask;
> +	ctrl |= state;
> +	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
> +
> +	if (wait_for(COND, 100))
> +		DRM_ERROR("timout setting power well state %08x (%08x)\n",
> +			  state,
> +			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
> +
> +#undef COND
> +
> +out:
> +	mutex_unlock(&dev_priv->rps.hw_lock);
> +}
> +
> +static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
> +				   struct i915_power_well *power_well)
> +{
> +	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
> +}
> +
> +static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
> +				  struct i915_power_well *power_well)
> +{
> +	vlv_set_power_well(dev_priv, power_well, true);
> +}
> +
> +static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
> +				   struct i915_power_well *power_well)
> +{
> +	vlv_set_power_well(dev_priv, power_well, false);
> +}
> +
> +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
> +				   struct i915_power_well *power_well)
> +{
> +	int power_well_id = power_well->data;
> +	bool enabled = false;
> +	u32 mask;
> +	u32 state;
> +	u32 ctrl;
> +
> +	mask = PUNIT_PWRGT_MASK(power_well_id);
> +	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
> +
> +	mutex_lock(&dev_priv->rps.hw_lock);
> +
> +	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
> +	/*
> +	 * We only ever set the power-on and power-gate states, anything
> +	 * else is unexpected.
> +	 */
> +	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
> +		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
> +	if (state == ctrl)
> +		enabled = true;
> +
> +	/*
> +	 * A transient state at this point would mean some unexpected party
> +	 * is poking at the power controls too.
> +	 */
> +	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
> +	WARN_ON(ctrl != state);
> +
> +	mutex_unlock(&dev_priv->rps.hw_lock);
> +
> +	return enabled;
> +}
> +
> +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
> +				          struct i915_power_well *power_well)
> +{
> +
> +	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
> +
> +	vlv_set_power_well(dev_priv, power_well, true);
> +
> +	spin_lock_irq(&dev_priv->irq_lock);
> +	valleyview_enable_display_irqs(dev_priv);
> +	spin_unlock_irq(&dev_priv->irq_lock);
> +
> +	/*
> +	 * During driver initialization we need to defer enabling hotplug
> +	 * processing until fbdev is set up.
> +	 */
> +	if (dev_priv->enable_hotplug_processing)
> +		intel_hpd_init(dev_priv->dev);
> +
> +	i915_redisable_vga_power_on(dev_priv->dev);
> +}
> +
> +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
> +				           struct i915_power_well *power_well)
> +{
> +	struct drm_device *dev = dev_priv->dev;
> +	enum pipe pipe;
> +
> +	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
> +
> +	spin_lock_irq(&dev_priv->irq_lock);
> +	for_each_pipe(pipe)
> +		intel_set_cpu_fifo_underrun_reporting_nolock(dev, pipe, false);
> +
> +	valleyview_disable_display_irqs(dev_priv);
> +	spin_unlock_irq(&dev_priv->irq_lock);
> +
> +	spin_lock_irq(&dev->vbl_lock);
> +	for_each_pipe(pipe)
> +		reset_vblank_counter(dev, pipe);
> +	spin_unlock_irq(&dev->vbl_lock);
> +
> +	vlv_set_power_well(dev_priv, power_well, false);
> +}
> +
>  static void check_power_well_state(struct drm_i915_private *dev_priv,
>  				   struct i915_power_well *power_well)
>  {
> @@ -5488,6 +5623,35 @@ EXPORT_SYMBOL_GPL(i915_release_power_well);
>  	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
>  	BIT(POWER_DOMAIN_INIT))
>  
> +#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
> +#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
> +
> +#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
> +	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_CRT) |		\
> +	BIT(POWER_DOMAIN_INIT))
> +
> +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
> +	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
> +	BIT(POWER_DOMAIN_INIT))
> +
> +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
> +	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
> +	BIT(POWER_DOMAIN_INIT))
> +
> +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
> +	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
> +	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
> +	BIT(POWER_DOMAIN_INIT))
> +
> +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
> +	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
> +	BIT(POWER_DOMAIN_INIT))
> +
>  static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
>  	.sync_hw = i9xx_always_on_power_well_noop,
>  	.enable = i9xx_always_on_power_well_noop,
> @@ -5539,6 +5703,77 @@ static struct i915_power_well bdw_power_wells[] = {
>  	},
>  };
>  
> +static const struct i915_power_well_ops vlv_display_power_well_ops = {
> +	.sync_hw = vlv_power_well_sync_hw,
> +	.enable = vlv_display_power_well_enable,
> +	.disable = vlv_display_power_well_disable,
> +	.is_enabled = vlv_power_well_enabled,
> +};
> +
> +static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
> +	.sync_hw = vlv_power_well_sync_hw,
> +	.enable = vlv_power_well_enable,
> +	.disable = vlv_power_well_disable,
> +	.is_enabled = vlv_power_well_enabled,
> +};
> +
> +static struct i915_power_well vlv_power_wells[] = {
> +	{
> +		.name = "always-on",
> +		.always_on = 1,
> +		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
> +		.ops = &i9xx_always_on_power_well_ops,
> +	},
> +	{
> +		.name = "display",
> +		.domains = VLV_DISPLAY_POWER_DOMAINS,
> +		.data = PUNIT_POWER_WELL_DISP2D,
> +		.ops = &vlv_display_power_well_ops,
> +	},
> +	{
> +		.name = "dpio-common",
> +		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
> +		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
> +		.ops = &vlv_dpio_power_well_ops,
> +	},
> +	{
> +		.name = "dpio-tx-b-01",
> +		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
> +		.ops = &vlv_dpio_power_well_ops,
> +		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
> +	},
> +	{
> +		.name = "dpio-tx-b-23",
> +		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
> +		.ops = &vlv_dpio_power_well_ops,
> +		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
> +	},
> +	{
> +		.name = "dpio-tx-c-01",
> +		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
> +		.ops = &vlv_dpio_power_well_ops,
> +		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
> +	},
> +	{
> +		.name = "dpio-tx-c-23",
> +		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
> +			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
> +		.ops = &vlv_dpio_power_well_ops,
> +		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
> +	},
> +};
> +
>  #define set_power_wells(power_domains, __power_wells) ({		\
>  	(power_domains)->power_wells = (__power_wells);			\
>  	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
> @@ -5560,6 +5795,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
>  	} else if (IS_BROADWELL(dev_priv->dev)) {
>  		set_power_wells(power_domains, bdw_power_wells);
>  		hsw_pwr = power_domains;
> +	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
> +		set_power_wells(power_domains, vlv_power_wells);
>  	} else {
>  		set_power_wells(power_domains, i9xx_always_on_power_well);
>  	}

Reviewed-by: Jesse Barnes <jbarnes at virtuousgeek.org>

-- 
Jesse Barnes, Intel Open Source Technology Center



More information about the Intel-gfx mailing list