[Intel-gfx] [PATCH 21/36] drm/i915: Split GT powermanagement functions to intel_gt_pm.c
Sagar Arun Kamble
sagar.a.kamble at intel.com
Fri Mar 16 06:23:08 UTC 2018
On 3/14/2018 3:07 PM, Chris Wilson wrote:
> intel_pm.c has grown to several thousand lines of loosely connected code
> handling various powermanagement tasks. Split out the GT portion (IPS,
> RPS and RC6) into its own file for easier maintenance.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
<snip>
> diff --git a/drivers/gpu/drm/i915/intel_gt_pm.c b/drivers/gpu/drm/i915/intel_gt_pm.c
> new file mode 100644
> index 000000000000..763bf9378ae8
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_gt_pm.c
> @@ -0,0 +1,2422 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2012-2018 Intel Corporation
> + */
> +
> +#include <linux/cpufreq.h>
> +#include <linux/module.h>
> +
> +#include "../../../platform/x86/intel_ips.h"
> +
> +#include "i915_drv.h"
> +#include "intel_drv.h"
> +#include "intel_gt_pm.h"
I think intel_gt_pm.h should be the first include as we have been on GuC
side refactoring
> +#include "intel_sideband.h"
> +
> +/**
> + * DOC: RC6
> + *
> + * RC6 is a special power stage which allows the GPU to enter an very
> + * low-voltage mode when idle, using down to 0V while at this stage. This
> + * stage is entered automatically when the GPU is idle when RC6 support is
> + * enabled, and as soon as new workload arises GPU wakes up automatically as
> + * well.
> + *
> + * There are different RC6 modes available in Intel GPU, which differentiate
> + * among each other with the latency required to enter and leave RC6 and
> + * voltage consumed by the GPU in different states.
> + *
> + * The combination of the following flags define which states GPU is allowed
> + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
> + * RC6pp is deepest RC6. Their support by hardware varies according to the
> + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
> + * which brings the most power savings; deeper states save more power, but
> + * require higher latency to switch to and wake up.
> + */
> +
...
> diff --git a/drivers/gpu/drm/i915/intel_gt_pm.h b/drivers/gpu/drm/i915/intel_gt_pm.h
> new file mode 100644
> index 000000000000..ab4f73a39ce6
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_gt_pm.h
> @@ -0,0 +1,49 @@
> +/*
> + * Copyright © 2012 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + */
> +
Need SPDX License identifier here.
Thanks for many checkpatch/comment fixes. Few more are still flagged.
Otherwise change looks good to me.
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
> +#ifndef __INTEL_GT_PM_H__
> +#define __INTEL_GT_PM_H__
> +
> +struct drm_i915_private;
> +struct i915_request;
> +struct intel_rps_client;
> +
> +void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
> +void intel_gpu_ips_teardown(void);
> +
> +void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
> +
> +void gen6_rps_busy(struct drm_i915_private *dev_priv);
> +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
> +void gen6_rps_idle(struct drm_i915_private *dev_priv);
> +void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
> +
> +int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
> +int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
> +
> +#endif /* __INTEL_GT_PM_H__ */
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index a2ebf66ff9ed..0bbee12bee41 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -34,27 +34,6 @@
> #include "i915_drv.h"
> #include "intel_drv.h"
> #include "intel_sideband.h"
> -#include "../../../platform/x86/intel_ips.h"
> -
> -/**
> - * DOC: RC6
> - *
> - * RC6 is a special power stage which allows the GPU to enter an very
> - * low-voltage mode when idle, using down to 0V while at this stage. This
> - * stage is entered automatically when the GPU is idle when RC6 support is
> - * enabled, and as soon as new workload arises GPU wakes up automatically as well.
> - *
> - * There are different RC6 modes available in Intel GPU, which differentiate
> - * among each other with the latency required to enter and leave RC6 and
> - * voltage consumed by the GPU in different states.
> - *
> - * The combination of the following flags define which states GPU is allowed
> - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
> - * RC6pp is deepest RC6. Their support by hardware varies according to the
> - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
> - * which brings the most power savings; deeper states save more power, but
> - * require higher latency to switch to and wake up.
> - */
>
> static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
> {
> @@ -5925,2548 +5904,269 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
> intel_enable_ipc(dev_priv);
> }
>
> -/*
> - * Lock protecting IPS related data structures
> - */
> -DEFINE_SPINLOCK(mchdev_lock);
> +static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
> +{
> + /*
> + * On Ibex Peak and Cougar Point, we need to disable clock
> + * gating for the panel power sequencer or it will fail to
> + * start up when no ports are active.
> + */
> + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
> +}
>
> -bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
> +static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
> {
> - u16 rgvswctl;
> + enum pipe pipe;
>
> - lockdep_assert_held(&mchdev_lock);
> + for_each_pipe(dev_priv, pipe) {
> + I915_WRITE(DSPCNTR(pipe),
> + I915_READ(DSPCNTR(pipe)) |
> + DISPPLANE_TRICKLE_FEED_DISABLE);
>
> - rgvswctl = I915_READ16(MEMSWCTL);
> - if (rgvswctl & MEMCTL_CMD_STS) {
> - DRM_DEBUG("gpu busy, RCS change rejected\n");
> - return false; /* still busy with another command */
> + I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
> + POSTING_READ(DSPSURF(pipe));
> }
> -
> - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
> - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
> - I915_WRITE16(MEMSWCTL, rgvswctl);
> - POSTING_READ16(MEMSWCTL);
> -
> - rgvswctl |= MEMCTL_CMD_STS;
> - I915_WRITE16(MEMSWCTL, rgvswctl);
> -
> - return true;
> }
>
> -static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
> +static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
> {
> - u32 rgvmodectl;
> - u8 fmax, fmin, fstart, vstart;
> -
> - spin_lock_irq(&mchdev_lock);
> -
> - rgvmodectl = I915_READ(MEMMODECTL);
> -
> - /* Enable temp reporting */
> - I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
> - I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
> -
> - /* 100ms RC evaluation intervals */
> - I915_WRITE(RCUPEI, 100000);
> - I915_WRITE(RCDNEI, 100000);
> -
> - /* Set max/min thresholds to 90ms and 80ms respectively */
> - I915_WRITE(RCBMAXAVG, 90000);
> - I915_WRITE(RCBMINAVG, 80000);
> -
> - I915_WRITE(MEMIHYST, 1);
> -
> - /* Set up min, max, and cur for interrupt handling */
> - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
> - fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
> - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
> - MEMMODE_FSTART_SHIFT;
> -
> - vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
> - PXVFREQ_PX_SHIFT;
> -
> - dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
> - dev_priv->ips.fstart = fstart;
> + uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
>
> - dev_priv->ips.max_delay = fstart;
> - dev_priv->ips.min_delay = fmin;
> - dev_priv->ips.cur_delay = fstart;
> + /*
> + * Required for FBC
> + * WaFbcDisableDpfcClockGating:ilk
> + */
> + dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
> + ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
> + ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
>
> - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
> - fmax, fmin, fstart);
> + I915_WRITE(PCH_3DCGDIS0,
> + MARIUNIT_CLOCK_GATE_DISABLE |
> + SVSMUNIT_CLOCK_GATE_DISABLE);
> + I915_WRITE(PCH_3DCGDIS1,
> + VFMUNIT_CLOCK_GATE_DISABLE);
>
> - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
> + /*
> + * According to the spec the following bits should be set in
> + * order to enable memory self-refresh
> + * The bit 22/21 of 0x42004
> + * The bit 5 of 0x42020
> + * The bit 15 of 0x45000
> + */
> + I915_WRITE(ILK_DISPLAY_CHICKEN2,
> + (I915_READ(ILK_DISPLAY_CHICKEN2) |
> + ILK_DPARB_GATE | ILK_VSDPFD_FULL));
> + dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
> + I915_WRITE(DISP_ARB_CTL,
> + (I915_READ(DISP_ARB_CTL) |
> + DISP_FBC_WM_DIS));
>
> /*
> - * Interrupts will be enabled in ironlake_irq_postinstall
> + * Based on the document from hardware guys the following bits
> + * should be set unconditionally in order to enable FBC.
> + * The bit 22 of 0x42000
> + * The bit 22 of 0x42004
> + * The bit 7,8,9 of 0x42020.
> */
> + if (IS_IRONLAKE_M(dev_priv)) {
> + /* WaFbcAsynchFlipDisableFbcQueue:ilk */
> + I915_WRITE(ILK_DISPLAY_CHICKEN1,
> + I915_READ(ILK_DISPLAY_CHICKEN1) |
> + ILK_FBCQ_DIS);
> + I915_WRITE(ILK_DISPLAY_CHICKEN2,
> + I915_READ(ILK_DISPLAY_CHICKEN2) |
> + ILK_DPARB_GATE);
> + }
>
> - I915_WRITE(VIDSTART, vstart);
> - POSTING_READ(VIDSTART);
> + I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
>
> - rgvmodectl |= MEMMODE_SWMODE_EN;
> - I915_WRITE(MEMMODECTL, rgvmodectl);
> + I915_WRITE(ILK_DISPLAY_CHICKEN2,
> + I915_READ(ILK_DISPLAY_CHICKEN2) |
> + ILK_ELPIN_409_SELECT);
> + I915_WRITE(_3D_CHICKEN2,
> + _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
> + _3D_CHICKEN2_WM_READ_PIPELINED);
>
> - if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
> - DRM_ERROR("stuck trying to change perf mode\n");
> - mdelay(1);
> + /* WaDisableRenderCachePipelinedFlush:ilk */
> + I915_WRITE(CACHE_MODE_0,
> + _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
>
> - ironlake_set_drps(dev_priv, fstart);
> + /* WaDisable_RenderCache_OperationalFlush:ilk */
> + I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
>
> - dev_priv->ips.last_count1 = I915_READ(DMIEC) +
> - I915_READ(DDREC) + I915_READ(CSIEC);
> - dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
> - dev_priv->ips.last_count2 = I915_READ(GFXEC);
> - dev_priv->ips.last_time2 = ktime_get_raw_ns();
> + g4x_disable_trickle_feed(dev_priv);
>
> - spin_unlock_irq(&mchdev_lock);
> + ibx_init_clock_gating(dev_priv);
> }
>
> -static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
> +static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
> {
> - u16 rgvswctl;
> -
> - spin_lock_irq(&mchdev_lock);
> -
> - rgvswctl = I915_READ16(MEMSWCTL);
> -
> - /* Ack interrupts, disable EFC interrupt */
> - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
> - I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
> - I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
> - I915_WRITE(DEIIR, DE_PCU_EVENT);
> - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
> -
> - /* Go back to the starting frequency */
> - ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
> - mdelay(1);
> - rgvswctl |= MEMCTL_CMD_STS;
> - I915_WRITE(MEMSWCTL, rgvswctl);
> - mdelay(1);
> + int pipe;
> + uint32_t val;
>
> - spin_unlock_irq(&mchdev_lock);
> + /*
> + * On Ibex Peak and Cougar Point, we need to disable clock
> + * gating for the panel power sequencer or it will fail to
> + * start up when no ports are active.
> + */
> + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
> + PCH_DPLUNIT_CLOCK_GATE_DISABLE |
> + PCH_CPUNIT_CLOCK_GATE_DISABLE);
> + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
> + DPLS_EDP_PPS_FIX_DIS);
> + /* The below fixes the weird display corruption, a few pixels shifted
> + * downward, on (only) LVDS of some HP laptops with IVY.
> + */
> + for_each_pipe(dev_priv, pipe) {
> + val = I915_READ(TRANS_CHICKEN2(pipe));
> + val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
> + val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
> + if (dev_priv->vbt.fdi_rx_polarity_inverted)
> + val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
> + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
> + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
> + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
> + I915_WRITE(TRANS_CHICKEN2(pipe), val);
> + }
> + /* WADP0ClockGatingDisable */
> + for_each_pipe(dev_priv, pipe) {
> + I915_WRITE(TRANS_CHICKEN1(pipe),
> + TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
> + }
> }
>
> -/* There's a funny hw issue where the hw returns all 0 when reading from
> - * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
> - * ourselves, instead of doing a rmw cycle (which might result in us clearing
> - * all limits and the gpu stuck at whatever frequency it is at atm).
> - */
> -static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
> +static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
> {
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u32 limits;
> -
> - /* Only set the down limit when we've reached the lowest level to avoid
> - * getting more interrupts, otherwise leave this clear. This prevents a
> - * race in the hw when coming out of rc6: There's a tiny window where
> - * the hw runs at the minimal clock before selecting the desired
> - * frequency, if the down threshold expires in that window we will not
> - * receive a down interrupt. */
> - if (INTEL_GEN(dev_priv) >= 9) {
> - limits = (rps->max_freq_softlimit) << 23;
> - if (val <= rps->min_freq_softlimit)
> - limits |= (rps->min_freq_softlimit) << 14;
> - } else {
> - limits = rps->max_freq_softlimit << 24;
> - if (val <= rps->min_freq_softlimit)
> - limits |= rps->min_freq_softlimit << 16;
> - }
> + uint32_t tmp;
>
> - return limits;
> + tmp = I915_READ(MCH_SSKPD);
> + if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
> + DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
> + tmp);
> }
>
> -static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
> +static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
> {
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - int new_power;
> - u32 threshold_up = 0, threshold_down = 0; /* in % */
> - u32 ei_up = 0, ei_down = 0;
> + uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
>
> - new_power = rps->power;
> - switch (rps->power) {
> - case LOW_POWER:
> - if (val > rps->efficient_freq + 1 &&
> - val > rps->cur_freq)
> - new_power = BETWEEN;
> - break;
> + I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
>
> - case BETWEEN:
> - if (val <= rps->efficient_freq &&
> - val < rps->cur_freq)
> - new_power = LOW_POWER;
> - else if (val >= rps->rp0_freq &&
> - val > rps->cur_freq)
> - new_power = HIGH_POWER;
> - break;
> + I915_WRITE(ILK_DISPLAY_CHICKEN2,
> + I915_READ(ILK_DISPLAY_CHICKEN2) |
> + ILK_ELPIN_409_SELECT);
>
> - case HIGH_POWER:
> - if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
> - val < rps->cur_freq)
> - new_power = BETWEEN;
> - break;
> - }
> - /* Max/min bins are special */
> - if (val <= rps->min_freq_softlimit)
> - new_power = LOW_POWER;
> - if (val >= rps->max_freq_softlimit)
> - new_power = HIGH_POWER;
> - if (new_power == rps->power)
> - return;
> + /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
> + I915_WRITE(_3D_CHICKEN,
> + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
>
> - /* Note the units here are not exactly 1us, but 1280ns. */
> - switch (new_power) {
> - case LOW_POWER:
> - /* Upclock if more than 95% busy over 16ms */
> - ei_up = 16000;
> - threshold_up = 95;
> + /* WaDisable_RenderCache_OperationalFlush:snb */
> + I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
>
> - /* Downclock if less than 85% busy over 32ms */
> - ei_down = 32000;
> - threshold_down = 85;
> - break;
> + /*
> + * BSpec recoomends 8x4 when MSAA is used,
> + * however in practice 16x4 seems fastest.
> + *
> + * Note that PS/WM thread counts depend on the WIZ hashing
> + * disable bit, which we don't touch here, but it's good
> + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
> + */
> + I915_WRITE(GEN6_GT_MODE,
> + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
>
> - case BETWEEN:
> - /* Upclock if more than 90% busy over 13ms */
> - ei_up = 13000;
> - threshold_up = 90;
> + I915_WRITE(CACHE_MODE_0,
> + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
>
> - /* Downclock if less than 75% busy over 32ms */
> - ei_down = 32000;
> - threshold_down = 75;
> - break;
> + I915_WRITE(GEN6_UCGCTL1,
> + I915_READ(GEN6_UCGCTL1) |
> + GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
> + GEN6_CSUNIT_CLOCK_GATE_DISABLE);
>
> - case HIGH_POWER:
> - /* Upclock if more than 85% busy over 10ms */
> - ei_up = 10000;
> - threshold_up = 85;
> + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
> + * gating disable must be set. Failure to set it results in
> + * flickering pixels due to Z write ordering failures after
> + * some amount of runtime in the Mesa "fire" demo, and Unigine
> + * Sanctuary and Tropics, and apparently anything else with
> + * alpha test or pixel discard.
> + *
> + * According to the spec, bit 11 (RCCUNIT) must also be set,
> + * but we didn't debug actual testcases to find it out.
> + *
> + * WaDisableRCCUnitClockGating:snb
> + * WaDisableRCPBUnitClockGating:snb
> + */
> + I915_WRITE(GEN6_UCGCTL2,
> + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
> + GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
>
> - /* Downclock if less than 60% busy over 32ms */
> - ei_down = 32000;
> - threshold_down = 60;
> - break;
> - }
> + /* WaStripsFansDisableFastClipPerformanceFix:snb */
> + I915_WRITE(_3D_CHICKEN3,
> + _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
>
> - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> - /*
> - * Baytrail and Braswell control the gpu frequency via the
> - * punit, which is very slow and expensive to communicate with,
> - * as we synchronously force the package to C0. If we try and
> - * update the gpufreq too often we cause measurable system
> - * load for little benefit (effectively stealing CPU time for
> - * the GPU, negatively impacting overall throughput).
> - */
> - ei_up <<= 2;
> - ei_down <<= 2;
> - }
> + /*
> + * Bspec says:
> + * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
> + * 3DSTATE_SF number of SF output attributes is more than 16."
> + */
> + I915_WRITE(_3D_CHICKEN3,
> + _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
>
> - I915_WRITE(GEN6_RP_UP_EI,
> - GT_INTERVAL_FROM_US(dev_priv, ei_up));
> - I915_WRITE(GEN6_RP_UP_THRESHOLD,
> - GT_INTERVAL_FROM_US(dev_priv,
> - ei_up * threshold_up / 100));
> + /*
> + * According to the spec the following bits should be
> + * set in order to enable memory self-refresh and fbc:
> + * The bit21 and bit22 of 0x42000
> + * The bit21 and bit22 of 0x42004
> + * The bit5 and bit7 of 0x42020
> + * The bit14 of 0x70180
> + * The bit14 of 0x71180
> + *
> + * WaFbcAsynchFlipDisableFbcQueue:snb
> + */
> + I915_WRITE(ILK_DISPLAY_CHICKEN1,
> + I915_READ(ILK_DISPLAY_CHICKEN1) |
> + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
> + I915_WRITE(ILK_DISPLAY_CHICKEN2,
> + I915_READ(ILK_DISPLAY_CHICKEN2) |
> + ILK_DPARB_GATE | ILK_VSDPFD_FULL);
> + I915_WRITE(ILK_DSPCLK_GATE_D,
> + I915_READ(ILK_DSPCLK_GATE_D) |
> + ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
> + ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
>
> - I915_WRITE(GEN6_RP_DOWN_EI,
> - GT_INTERVAL_FROM_US(dev_priv, ei_down));
> - I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
> - GT_INTERVAL_FROM_US(dev_priv,
> - ei_down * threshold_down / 100));
> + g4x_disable_trickle_feed(dev_priv);
>
> - I915_WRITE(GEN6_RP_CONTROL,
> - GEN6_RP_MEDIA_TURBO |
> - GEN6_RP_MEDIA_HW_NORMAL_MODE |
> - GEN6_RP_MEDIA_IS_GFX |
> - GEN6_RP_ENABLE |
> - GEN6_RP_UP_BUSY_AVG |
> - GEN6_RP_DOWN_IDLE_AVG);
> + cpt_init_clock_gating(dev_priv);
>
> - rps->power = new_power;
> - rps->up_threshold = threshold_up;
> - rps->down_threshold = threshold_down;
> - rps->last_adj = 0;
> + gen6_check_mch_setup(dev_priv);
> }
>
> -static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
> +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
> {
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u32 mask = 0;
> -
> - /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
> - if (val > rps->min_freq_softlimit)
> - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
> - if (val < rps->max_freq_softlimit)
> - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
> + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
>
> - mask &= dev_priv->pm_rps_events;
> + /*
> + * WaVSThreadDispatchOverride:ivb,vlv
> + *
> + * This actually overrides the dispatch
> + * mode for all thread types.
> + */
> + reg &= ~GEN7_FF_SCHED_MASK;
> + reg |= GEN7_FF_TS_SCHED_HW;
> + reg |= GEN7_FF_VS_SCHED_HW;
> + reg |= GEN7_FF_DS_SCHED_HW;
>
> - return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
> + I915_WRITE(GEN7_FF_THREAD_MODE, reg);
> }
>
> -/* gen6_set_rps is called to update the frequency request, but should also be
> - * called when the range (min_delay and max_delay) is modified so that we can
> - * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
> -static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
> +static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
> {
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /* min/max delay may still have been modified so be sure to
> - * write the limits value.
> + /*
> + * TODO: this bit should only be enabled when really needed, then
> + * disabled when not needed anymore in order to save power.
> */
> - if (val != rps->cur_freq) {
> - gen6_set_rps_thresholds(dev_priv, val);
> -
> - if (INTEL_GEN(dev_priv) >= 9)
> - I915_WRITE(GEN6_RPNSWREQ,
> - GEN9_FREQUENCY(val));
> - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> - I915_WRITE(GEN6_RPNSWREQ,
> - HSW_FREQUENCY(val));
> - else
> - I915_WRITE(GEN6_RPNSWREQ,
> - GEN6_FREQUENCY(val) |
> - GEN6_OFFSET(0) |
> - GEN6_AGGRESSIVE_TURBO);
> - }
> -
> - /* Make sure we continue to get interrupts
> - * until we hit the minimum or maximum frequencies.
> - */
> - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
> - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
> -
> - rps->cur_freq = val;
> - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
> -
> - return 0;
> -}
> -
> -static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
> -{
> - int err;
> -
> - if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
> - "Odd GPU freq value\n"))
> - val &= ~1;
> -
> - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
> -
> - if (val != dev_priv->gt_pm.rps.cur_freq) {
> - vlv_punit_get(dev_priv);
> - err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
> - vlv_punit_put(dev_priv);
> - if (err)
> - return err;
> -
> - gen6_set_rps_thresholds(dev_priv, val);
> - }
> -
> - dev_priv->gt_pm.rps.cur_freq = val;
> - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
> -
> - return 0;
> -}
> -
> -/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
> - *
> - * * If Gfx is Idle, then
> - * 1. Forcewake Media well.
> - * 2. Request idle freq.
> - * 3. Release Forcewake of Media well.
> -*/
> -static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u32 val = rps->idle_freq;
> - int err;
> -
> - if (rps->cur_freq <= val)
> - return;
> -
> - /* The punit delays the write of the frequency and voltage until it
> - * determines the GPU is awake. During normal usage we don't want to
> - * waste power changing the frequency if the GPU is sleeping (rc6).
> - * However, the GPU and driver is now idle and we do not want to delay
> - * switching to minimum voltage (reducing power whilst idle) as we do
> - * not expect to be woken in the near future and so must flush the
> - * change by waking the device.
> - *
> - * We choose to take the media powerwell (either would do to trick the
> - * punit into committing the voltage change) as that takes a lot less
> - * power than the render powerwell.
> - */
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
> - err = valleyview_set_rps(dev_priv, val);
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
> -
> - if (err)
> - DRM_ERROR("Failed to set RPS for idle\n");
> -}
> -
> -void gen6_rps_busy(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - mutex_lock(&rps->lock);
> - if (rps->enabled) {
> - u8 freq;
> -
> - if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
> - gen6_rps_reset_ei(dev_priv);
> - I915_WRITE(GEN6_PMINTRMSK,
> - gen6_rps_pm_mask(dev_priv, rps->cur_freq));
> -
> - gen6_enable_rps_interrupts(dev_priv);
> -
> - /* Use the user's desired frequency as a guide, but for better
> - * performance, jump directly to RPe as our starting frequency.
> - */
> - freq = max(rps->cur_freq,
> - rps->efficient_freq);
> -
> - if (intel_set_rps(dev_priv,
> - clamp(freq,
> - rps->min_freq_softlimit,
> - rps->max_freq_softlimit)))
> - DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
> - }
> - mutex_unlock(&rps->lock);
> -}
> -
> -void gen6_rps_idle(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /* Flush our bottom-half so that it does not race with us
> - * setting the idle frequency and so that it is bounded by
> - * our rpm wakeref. And then disable the interrupts to stop any
> - * futher RPS reclocking whilst we are asleep.
> - */
> - gen6_disable_rps_interrupts(dev_priv);
> -
> - mutex_lock(&rps->lock);
> - if (rps->enabled) {
> - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> - vlv_set_rps_idle(dev_priv);
> - else
> - gen6_set_rps(dev_priv, rps->idle_freq);
> - rps->last_adj = 0;
> - I915_WRITE(GEN6_PMINTRMSK,
> - gen6_sanitize_rps_pm_mask(dev_priv, ~0));
> - }
> - mutex_unlock(&rps->lock);
> -}
> -
> -void gen6_rps_boost(struct i915_request *rq,
> - struct intel_rps_client *rps_client)
> -{
> - struct intel_rps *rps = &rq->i915->gt_pm.rps;
> - unsigned long flags;
> - bool boost;
> -
> - /* This is intentionally racy! We peek at the state here, then
> - * validate inside the RPS worker.
> - */
> - if (!rps->enabled)
> - return;
> -
> - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
> - return;
> -
> - /* Serializes with i915_request_retire() */
> - boost = false;
> - spin_lock_irqsave(&rq->lock, flags);
> - if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
> - boost = !atomic_fetch_inc(&rps->num_waiters);
> - rq->waitboost = true;
> - }
> - spin_unlock_irqrestore(&rq->lock, flags);
> - if (!boost)
> - return;
> -
> - if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
> - schedule_work(&rps->work);
> -
> - atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
> -}
> -
> -int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - int err;
> -
> - lockdep_assert_held(&rps->lock);
> - GEM_BUG_ON(val > rps->max_freq);
> - GEM_BUG_ON(val < rps->min_freq);
> -
> - if (!rps->enabled) {
> - rps->cur_freq = val;
> - return 0;
> - }
> -
> - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> - err = valleyview_set_rps(dev_priv, val);
> - else
> - err = gen6_set_rps(dev_priv, val);
> -
> - return err;
> -}
> -
> -static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> - I915_WRITE(GEN9_PG_ENABLE, 0);
> -}
> -
> -static void gen9_disable_rps(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RP_CONTROL, 0);
> -}
> -
> -static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -}
> -
> -static void gen6_disable_rps(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
> - I915_WRITE(GEN6_RP_CONTROL, 0);
> -}
> -
> -static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -}
> -
> -static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RP_CONTROL, 0);
> -}
> -
> -static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
> -{
> - /* We're doing forcewake before Disabling RC6,
> - * This what the BIOS expects when going into suspend */
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
> -{
> - I915_WRITE(GEN6_RP_CONTROL, 0);
> -}
> -
> -static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
> -{
> - bool enable_rc6 = true;
> - unsigned long rc6_ctx_base;
> - u32 rc_ctl;
> - int rc_sw_target;
> -
> - rc_ctl = I915_READ(GEN6_RC_CONTROL);
> - rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
> - RC_SW_TARGET_STATE_SHIFT;
> - DRM_DEBUG_DRIVER("BIOS enabled RC states: "
> - "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
> - onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
> - onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
> - rc_sw_target);
> -
> - if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
> - DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
> - enable_rc6 = false;
> - }
> -
> - /*
> - * The exact context size is not known for BXT, so assume a page size
> - * for this check.
> - */
> - rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
> - if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
> - (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
> - DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
> - enable_rc6 = false;
> - }
> -
> - if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
> - ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
> - ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
> - ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
> - DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
> - enable_rc6 = false;
> - }
> -
> - if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
> - !I915_READ(GEN8_PUSHBUS_ENABLE) ||
> - !I915_READ(GEN8_PUSHBUS_SHIFT)) {
> - DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
> - enable_rc6 = false;
> - }
> -
> - if (!I915_READ(GEN6_GFXPAUSE)) {
> - DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
> - enable_rc6 = false;
> - }
> -
> - if (!I915_READ(GEN8_MISC_CTRL0)) {
> - DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
> - enable_rc6 = false;
> - }
> -
> - return enable_rc6;
> -}
> -
> -static bool sanitize_rc6(struct drm_i915_private *i915)
> -{
> - struct intel_device_info *info = mkwrite_device_info(i915);
> -
> - /* Powersaving is controlled by the host when inside a VM */
> - if (intel_vgpu_active(i915))
> - info->has_rc6 = 0;
> -
> - if (info->has_rc6 &&
> - IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
> - DRM_INFO("RC6 disabled by BIOS\n");
> - info->has_rc6 = 0;
> - }
> -
> - /*
> - * We assume that we do not have any deep rc6 levels if we don't have
> - * have the previous rc6 level supported, i.e. we use HAS_RC6()
> - * as the initial coarse check for rc6 in general, moving on to
> - * progressively finer/deeper levels.
> - */
> - if (!info->has_rc6 && info->has_rc6p)
> - info->has_rc6p = 0;
> -
> - return info->has_rc6;
> -}
> -
> -static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /* All of these values are in units of 50MHz */
> -
> - /* static values from HW: RP0 > RP1 > RPn (min_freq) */
> - if (IS_GEN9_LP(dev_priv)) {
> - u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
> - rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
> - rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
> - rps->min_freq = (rp_state_cap >> 0) & 0xff;
> - } else {
> - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
> - rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
> - rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
> - rps->min_freq = (rp_state_cap >> 16) & 0xff;
> - }
> - /* hw_max = RP0 until we check for overclocking */
> - rps->max_freq = rps->rp0_freq;
> -
> - rps->efficient_freq = rps->rp1_freq;
> - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
> - IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
> - u32 ddcc_status = 0;
> -
> - if (sandybridge_pcode_read(dev_priv,
> - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
> - &ddcc_status) == 0)
> - rps->efficient_freq =
> - clamp_t(u8,
> - ((ddcc_status >> 8) & 0xff),
> - rps->min_freq,
> - rps->max_freq);
> - }
> -
> - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
> - /* Store the frequency values in 16.66 MHZ units, which is
> - * the natural hardware unit for SKL
> - */
> - rps->rp0_freq *= GEN9_FREQ_SCALER;
> - rps->rp1_freq *= GEN9_FREQ_SCALER;
> - rps->min_freq *= GEN9_FREQ_SCALER;
> - rps->max_freq *= GEN9_FREQ_SCALER;
> - rps->efficient_freq *= GEN9_FREQ_SCALER;
> - }
> -}
> -
> -static void reset_rps(struct drm_i915_private *dev_priv,
> - int (*set)(struct drm_i915_private *, u8))
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u8 freq = rps->cur_freq;
> -
> - /* force a reset */
> - rps->power = -1;
> - rps->cur_freq = -1;
> -
> - if (set(dev_priv, freq))
> - DRM_ERROR("Failed to reset RPS to initial values\n");
> -}
> -
> -/* See the Gen9_GT_PM_Programming_Guide doc for the below */
> -static void gen9_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* Program defaults and thresholds for RPS */
> - if (IS_GEN9(dev_priv))
> - I915_WRITE(GEN6_RC_VIDEO_FREQ,
> - GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
> -
> - /* 1 second timeout*/
> - I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
> - GT_INTERVAL_FROM_US(dev_priv, 1000000));
> -
> - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
> -
> - /* Leaning on the below call to gen6_set_rps to program/setup the
> - * Up/Down EI & threshold registers, as well as the RP_CONTROL,
> - * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
> - reset_rps(dev_priv, gen6_set_rps);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - struct intel_engine_cs *engine;
> - enum intel_engine_id id;
> - u32 rc6_mode;
> -
> - /* 1a: Software RC state - RC0 */
> - I915_WRITE(GEN6_RC_STATE, 0);
> -
> - /* 1b: Get forcewake during program sequence. Although the driver
> - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* 2a: Disable RC states. */
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - /* 2b: Program RC6 thresholds.*/
> - if (INTEL_GEN(dev_priv) >= 10) {
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
> - I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
> - } else if (IS_SKYLAKE(dev_priv)) {
> - /*
> - * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
> - * when CPG is enabled
> - */
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
> - } else {
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
> - }
> -
> - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
> - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
> - for_each_engine(engine, dev_priv, id)
> - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> -
> - if (HAS_GUC(dev_priv))
> - I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
> -
> - I915_WRITE(GEN6_RC_SLEEP, 0);
> -
> - /*
> - * 2c: Program Coarse Power Gating Policies.
> - *
> - * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
> - * use instead is a more conservative estimate for the maximum time
> - * it takes us to service a CS interrupt and submit a new ELSP - that
> - * is the time which the GPU is idle waiting for the CPU to select the
> - * next request to execute. If the idle hysteresis is less than that
> - * interrupt service latency, the hardware will automatically gate
> - * the power well and we will then incur the wake up cost on top of
> - * the service latency. A similar guide from intel_pstate is that we
> - * do not want the enable hysteresis to less than the wakeup latency.
> - *
> - * igt/gem_exec_nop/sequential provides a rough estimate for the
> - * service latency, and puts it around 10us for Broadwell (and other
> - * big core) and around 40us for Broxton (and other low power cores).
> - * [Note that for legacy ringbuffer submission, this is less than 1us!]
> - * However, the wakeup latency on Broxton is closer to 100us. To be
> - * conservative, we have to factor in a context switch on top (due
> - * to ksoftirqd).
> - */
> - I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
> - I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
> -
> - /* 3a: Enable RC6 */
> - I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
> -
> - /* WaRsUseTimeoutMode:cnl (pre-prod) */
> - if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
> - rc6_mode = GEN7_RC_CTL_TO_MODE;
> - else
> - rc6_mode = GEN6_RC_CTL_EI_MODE(1);
> -
> - I915_WRITE(GEN6_RC_CONTROL,
> - GEN6_RC_CTL_HW_ENABLE |
> - GEN6_RC_CTL_RC6_ENABLE |
> - rc6_mode);
> -
> - /*
> - * 3b: Enable Coarse Power Gating only when RC6 is enabled.
> - * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
> - */
> - if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
> - I915_WRITE(GEN9_PG_ENABLE, 0);
> - else
> - I915_WRITE(GEN9_PG_ENABLE,
> - GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - struct intel_engine_cs *engine;
> - enum intel_engine_id id;
> -
> - /* 1a: Software RC state - RC0 */
> - I915_WRITE(GEN6_RC_STATE, 0);
> -
> - /* 1b: Get forcewake during program sequence. Although the driver
> - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* 2a: Disable RC states. */
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - /* 2b: Program RC6 thresholds.*/
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
> - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
> - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
> - for_each_engine(engine, dev_priv, id)
> - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> - I915_WRITE(GEN6_RC_SLEEP, 0);
> - I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
> -
> - /* 3: Enable RC6 */
> -
> - I915_WRITE(GEN6_RC_CONTROL,
> - GEN6_RC_CTL_HW_ENABLE |
> - GEN7_RC_CTL_TO_MODE |
> - GEN6_RC_CTL_RC6_ENABLE);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen8_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* 1 Program defaults and thresholds for RPS*/
> - I915_WRITE(GEN6_RPNSWREQ,
> - HSW_FREQUENCY(rps->rp1_freq));
> - I915_WRITE(GEN6_RC_VIDEO_FREQ,
> - HSW_FREQUENCY(rps->rp1_freq));
> - /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
> - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
> -
> - /* Docs recommend 900MHz, and 300 MHz respectively */
> - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
> - rps->max_freq_softlimit << 24 |
> - rps->min_freq_softlimit << 16);
> -
> - I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
> - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
> - I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
> - I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
> -
> - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
> -
> - /* 2: Enable RPS */
> - I915_WRITE(GEN6_RP_CONTROL,
> - GEN6_RP_MEDIA_TURBO |
> - GEN6_RP_MEDIA_HW_NORMAL_MODE |
> - GEN6_RP_MEDIA_IS_GFX |
> - GEN6_RP_ENABLE |
> - GEN6_RP_UP_BUSY_AVG |
> - GEN6_RP_DOWN_IDLE_AVG);
> -
> - reset_rps(dev_priv, gen6_set_rps);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - struct intel_engine_cs *engine;
> - enum intel_engine_id id;
> - u32 rc6vids, rc6_mask;
> - u32 gtfifodbg;
> - int ret;
> -
> - I915_WRITE(GEN6_RC_STATE, 0);
> -
> - /* Clear the DBG now so we don't confuse earlier errors */
> - gtfifodbg = I915_READ(GTFIFODBG);
> - if (gtfifodbg) {
> - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
> - I915_WRITE(GTFIFODBG, gtfifodbg);
> - }
> -
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* disable the counters and set deterministic thresholds */
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
> - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
> - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
> - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
> -
> - for_each_engine(engine, dev_priv, id)
> - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> -
> - I915_WRITE(GEN6_RC_SLEEP, 0);
> - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
> - if (IS_IVYBRIDGE(dev_priv))
> - I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
> - else
> - I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
> - I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
> - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
> -
> - /* We don't use those on Haswell */
> - rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
> - if (HAS_RC6p(dev_priv))
> - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
> - if (HAS_RC6pp(dev_priv))
> - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
> - I915_WRITE(GEN6_RC_CONTROL,
> - rc6_mask |
> - GEN6_RC_CTL_EI_MODE(1) |
> - GEN6_RC_CTL_HW_ENABLE);
> -
> - rc6vids = 0;
> - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
> - if (IS_GEN6(dev_priv) && ret) {
> - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
> - } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
> - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
> - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
> - rc6vids &= 0xffff00;
> - rc6vids |= GEN6_ENCODE_RC6_VID(450);
> - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
> - if (ret)
> - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
> - }
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen6_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - /* Here begins a magic sequence of register writes to enable
> - * auto-downclocking.
> - *
> - * Perhaps there might be some value in exposing these to
> - * userspace...
> - */
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* Power down if completely idle for over 50ms */
> - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
> - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
> -
> - reset_rps(dev_priv, gen6_set_rps);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - int min_freq = 15;
> - unsigned int gpu_freq;
> - unsigned int max_ia_freq, min_ring_freq;
> - unsigned int max_gpu_freq, min_gpu_freq;
> - int scaling_factor = 180;
> - struct cpufreq_policy *policy;
> -
> - lockdep_assert_held(&rps->lock);
> -
> - policy = cpufreq_cpu_get(0);
> - if (policy) {
> - max_ia_freq = policy->cpuinfo.max_freq;
> - cpufreq_cpu_put(policy);
> - } else {
> - /*
> - * Default to measured freq if none found, PCU will ensure we
> - * don't go over
> - */
> - max_ia_freq = tsc_khz;
> - }
> -
> - /* Convert from kHz to MHz */
> - max_ia_freq /= 1000;
> -
> - min_ring_freq = I915_READ(DCLK) & 0xf;
> - /* convert DDR frequency from units of 266.6MHz to bandwidth */
> - min_ring_freq = mult_frac(min_ring_freq, 8, 3);
> -
> - min_gpu_freq = rps->min_freq;
> - max_gpu_freq = rps->max_freq;
> - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
> - /* Convert GT frequency to 50 HZ units */
> - min_gpu_freq /= GEN9_FREQ_SCALER;
> - max_gpu_freq /= GEN9_FREQ_SCALER;
> - }
> -
> - /*
> - * For each potential GPU frequency, load a ring frequency we'd like
> - * to use for memory access. We do this by specifying the IA frequency
> - * the PCU should use as a reference to determine the ring frequency.
> - */
> - for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
> - int diff = max_gpu_freq - gpu_freq;
> - unsigned int ia_freq = 0, ring_freq = 0;
> -
> - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
> - /*
> - * ring_freq = 2 * GT. ring_freq is in 100MHz units
> - * No floor required for ring frequency on SKL.
> - */
> - ring_freq = gpu_freq;
> - } else if (INTEL_GEN(dev_priv) >= 8) {
> - /* max(2 * GT, DDR). NB: GT is 50MHz units */
> - ring_freq = max(min_ring_freq, gpu_freq);
> - } else if (IS_HASWELL(dev_priv)) {
> - ring_freq = mult_frac(gpu_freq, 5, 4);
> - ring_freq = max(min_ring_freq, ring_freq);
> - /* leave ia_freq as the default, chosen by cpufreq */
> - } else {
> - /* On older processors, there is no separate ring
> - * clock domain, so in order to boost the bandwidth
> - * of the ring, we need to upclock the CPU (ia_freq).
> - *
> - * For GPU frequencies less than 750MHz,
> - * just use the lowest ring freq.
> - */
> - if (gpu_freq < min_freq)
> - ia_freq = 800;
> - else
> - ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
> - ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
> - }
> -
> - sandybridge_pcode_write(dev_priv,
> - GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
> - ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
> - ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
> - gpu_freq);
> - }
> -}
> -
> -static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rp0;
> -
> - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
> -
> - switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
> - case 8:
> - /* (2 * 4) config */
> - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
> - break;
> - case 12:
> - /* (2 * 6) config */
> - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
> - break;
> - case 16:
> - /* (2 * 8) config */
> - default:
> - /* Setting (2 * 8) Min RP0 for any other combination */
> - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
> - break;
> - }
> -
> - rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
> -
> - return rp0;
> -}
> -
> -static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rpe;
> -
> - val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
> - rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
> -
> - return rpe;
> -}
> -
> -static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rp1;
> -
> - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
> - rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
> -
> - return rp1;
> -}
> -
> -static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rpn;
> -
> - val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
> - rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
> - FB_GFX_FREQ_FUSE_MASK);
> -
> - return rpn;
> -}
> -
> -static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rp1;
> -
> - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
> -
> - rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
> -
> - return rp1;
> -}
> -
> -static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rp0;
> -
> - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
> -
> - rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
> - /* Clamp to max */
> - rp0 = min_t(u32, rp0, 0xea);
> -
> - return rp0;
> -}
> -
> -static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val, rpe;
> -
> - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
> - rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
> - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
> - rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
> -
> - return rpe;
> -}
> -
> -static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
> -{
> - u32 val;
> -
> - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
> - /*
> - * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
> - * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
> - * a BYT-M B0 the above register contains 0xbf. Moreover when setting
> - * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
> - * to make sure it matches what Punit accepts.
> - */
> - return max_t(u32, val, 0xc0);
> -}
> -
> -/* Check that the pctx buffer wasn't move under us. */
> -static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
> -{
> - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
> -
> - WARN_ON(pctx_addr != dev_priv->dsm.start +
> - dev_priv->vlv_pctx->stolen->start);
> -}
> -
> -
> -/* Check that the pcbr address is not empty. */
> -static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
> -{
> - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
> -
> - WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
> -}
> -
> -static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
> -{
> - resource_size_t pctx_paddr, paddr;
> - resource_size_t pctx_size = 32*1024;
> - u32 pcbr;
> -
> - pcbr = I915_READ(VLV_PCBR);
> - if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
> - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
> - paddr = dev_priv->dsm.end + 1 - pctx_size;
> - GEM_BUG_ON(paddr > U32_MAX);
> -
> - pctx_paddr = (paddr & (~4095));
> - I915_WRITE(VLV_PCBR, pctx_paddr);
> - }
> -
> - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
> -}
> -
> -static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
> -{
> - struct drm_i915_gem_object *pctx;
> - resource_size_t pctx_paddr;
> - resource_size_t pctx_size = 24*1024;
> - u32 pcbr;
> -
> - pcbr = I915_READ(VLV_PCBR);
> - if (pcbr) {
> - /* BIOS set it up already, grab the pre-alloc'd space */
> - resource_size_t pcbr_offset;
> -
> - pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
> - pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
> - pcbr_offset,
> - I915_GTT_OFFSET_NONE,
> - pctx_size);
> - goto out;
> - }
> -
> - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
> -
> - /*
> - * From the Gunit register HAS:
> - * The Gfx driver is expected to program this register and ensure
> - * proper allocation within Gfx stolen memory. For example, this
> - * register should be programmed such than the PCBR range does not
> - * overlap with other ranges, such as the frame buffer, protected
> - * memory, or any other relevant ranges.
> - */
> - pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
> - if (!pctx) {
> - DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
> - goto out;
> - }
> -
> - GEM_BUG_ON(range_overflows_t(u64,
> - dev_priv->dsm.start,
> - pctx->stolen->start,
> - U32_MAX));
> - pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
> - I915_WRITE(VLV_PCBR, pctx_paddr);
> -
> -out:
> - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
> - dev_priv->vlv_pctx = pctx;
> -}
> -
> -static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
> -{
> - if (WARN_ON(!dev_priv->vlv_pctx))
> - return;
> -
> - i915_gem_object_put(dev_priv->vlv_pctx);
> - dev_priv->vlv_pctx = NULL;
> -}
> -
> -static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
> -{
> - dev_priv->gt_pm.rps.gpll_ref_freq =
> - vlv_get_cck_clock(dev_priv, "GPLL ref",
> - CCK_GPLL_CLOCK_CONTROL,
> - dev_priv->czclk_freq);
> -
> - DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
> - dev_priv->gt_pm.rps.gpll_ref_freq);
> -}
> -
> -static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u32 val;
> -
> - valleyview_setup_pctx(dev_priv);
> -
> - vlv_iosf_sb_get(dev_priv,
> - BIT(VLV_IOSF_SB_PUNIT) |
> - BIT(VLV_IOSF_SB_NC) |
> - BIT(VLV_IOSF_SB_CCK));
> -
> - vlv_init_gpll_ref_freq(dev_priv);
> -
> - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
> - switch ((val >> 6) & 3) {
> - case 0:
> - case 1:
> - dev_priv->mem_freq = 800;
> - break;
> - case 2:
> - dev_priv->mem_freq = 1066;
> - break;
> - case 3:
> - dev_priv->mem_freq = 1333;
> - break;
> - }
> - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
> -
> - rps->max_freq = valleyview_rps_max_freq(dev_priv);
> - rps->rp0_freq = rps->max_freq;
> - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->max_freq),
> - rps->max_freq);
> -
> - rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
> - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->efficient_freq),
> - rps->efficient_freq);
> -
> - rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
> - DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->rp1_freq),
> - rps->rp1_freq);
> -
> - rps->min_freq = valleyview_rps_min_freq(dev_priv);
> - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->min_freq),
> - rps->min_freq);
> -
> - vlv_iosf_sb_put(dev_priv,
> - BIT(VLV_IOSF_SB_PUNIT) |
> - BIT(VLV_IOSF_SB_NC) |
> - BIT(VLV_IOSF_SB_CCK));
> -}
> -
> -static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> - u32 val;
> -
> - cherryview_setup_pctx(dev_priv);
> -
> - vlv_iosf_sb_get(dev_priv,
> - BIT(VLV_IOSF_SB_PUNIT) |
> - BIT(VLV_IOSF_SB_NC) |
> - BIT(VLV_IOSF_SB_CCK));
> -
> - vlv_init_gpll_ref_freq(dev_priv);
> -
> - val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
> -
> - switch ((val >> 2) & 0x7) {
> - case 3:
> - dev_priv->mem_freq = 2000;
> - break;
> - default:
> - dev_priv->mem_freq = 1600;
> - break;
> - }
> - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
> -
> - rps->max_freq = cherryview_rps_max_freq(dev_priv);
> - rps->rp0_freq = rps->max_freq;
> - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->max_freq),
> - rps->max_freq);
> -
> - rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
> - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->efficient_freq),
> - rps->efficient_freq);
> -
> - rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
> - DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->rp1_freq),
> - rps->rp1_freq);
> -
> - rps->min_freq = cherryview_rps_min_freq(dev_priv);
> - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
> - intel_gpu_freq(dev_priv, rps->min_freq),
> - rps->min_freq);
> -
> - vlv_iosf_sb_put(dev_priv,
> - BIT(VLV_IOSF_SB_PUNIT) |
> - BIT(VLV_IOSF_SB_NC) |
> - BIT(VLV_IOSF_SB_CCK));
> -
> - WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
> - rps->min_freq) & 1,
> - "Odd GPU freq values\n");
> -}
> -
> -static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - valleyview_cleanup_pctx(dev_priv);
> -}
> -
> -static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - struct intel_engine_cs *engine;
> - enum intel_engine_id id;
> - u32 gtfifodbg, rc6_mode, pcbr;
> -
> - gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
> - GT_FIFO_FREE_ENTRIES_CHV);
> - if (gtfifodbg) {
> - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
> - gtfifodbg);
> - I915_WRITE(GTFIFODBG, gtfifodbg);
> - }
> -
> - cherryview_check_pctx(dev_priv);
> -
> - /* 1a & 1b: Get forcewake during program sequence. Although the driver
> - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* Disable RC states. */
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - /* 2a: Program RC6 thresholds.*/
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
> - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
> - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
> -
> - for_each_engine(engine, dev_priv, id)
> - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> - I915_WRITE(GEN6_RC_SLEEP, 0);
> -
> - /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
> - I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
> -
> - /* Allows RC6 residency counter to work */
> - I915_WRITE(VLV_COUNTER_CONTROL,
> - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
> - VLV_MEDIA_RC6_COUNT_EN |
> - VLV_RENDER_RC6_COUNT_EN));
> -
> - /* For now we assume BIOS is allocating and populating the PCBR */
> - pcbr = I915_READ(VLV_PCBR);
> -
> - /* 3: Enable RC6 */
> - rc6_mode = 0;
> - if (pcbr >> VLV_PCBR_ADDR_SHIFT)
> - rc6_mode = GEN7_RC_CTL_TO_MODE;
> - I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - u32 val;
> -
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* 1: Program defaults and thresholds for RPS*/
> - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
> - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
> - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
> - I915_WRITE(GEN6_RP_UP_EI, 66000);
> - I915_WRITE(GEN6_RP_DOWN_EI, 350000);
> -
> - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
> -
> - /* 2: Enable RPS */
> - I915_WRITE(GEN6_RP_CONTROL,
> - GEN6_RP_MEDIA_HW_NORMAL_MODE |
> - GEN6_RP_MEDIA_IS_GFX |
> - GEN6_RP_ENABLE |
> - GEN6_RP_UP_BUSY_AVG |
> - GEN6_RP_DOWN_IDLE_AVG);
> -
> - /* Setting Fixed Bias */
> - vlv_punit_get(dev_priv);
> -
> - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
> - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
> -
> - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
> -
> - vlv_punit_put(dev_priv);
> -
> - /* RPS code assumes GPLL is used */
> - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
> -
> - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
> - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
> -
> - reset_rps(dev_priv, valleyview_set_rps);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - struct intel_engine_cs *engine;
> - enum intel_engine_id id;
> - u32 gtfifodbg;
> -
> - valleyview_check_pctx(dev_priv);
> -
> - gtfifodbg = I915_READ(GTFIFODBG);
> - if (gtfifodbg) {
> - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
> - gtfifodbg);
> - I915_WRITE(GTFIFODBG, gtfifodbg);
> - }
> -
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - /* Disable RC states. */
> - I915_WRITE(GEN6_RC_CONTROL, 0);
> -
> - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
> - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
> - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
> -
> - for_each_engine(engine, dev_priv, id)
> - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> -
> - I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
> -
> - /* Allows RC6 residency counter to work */
> - I915_WRITE(VLV_COUNTER_CONTROL,
> - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
> - VLV_MEDIA_RC0_COUNT_EN |
> - VLV_RENDER_RC0_COUNT_EN |
> - VLV_MEDIA_RC6_COUNT_EN |
> - VLV_RENDER_RC6_COUNT_EN));
> -
> - I915_WRITE(GEN6_RC_CONTROL,
> - GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - u32 val;
> -
> - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> -
> - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
> - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
> - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
> - I915_WRITE(GEN6_RP_UP_EI, 66000);
> - I915_WRITE(GEN6_RP_DOWN_EI, 350000);
> -
> - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
> -
> - I915_WRITE(GEN6_RP_CONTROL,
> - GEN6_RP_MEDIA_TURBO |
> - GEN6_RP_MEDIA_HW_NORMAL_MODE |
> - GEN6_RP_MEDIA_IS_GFX |
> - GEN6_RP_ENABLE |
> - GEN6_RP_UP_BUSY_AVG |
> - GEN6_RP_DOWN_IDLE_CONT);
> -
> - vlv_punit_get(dev_priv);
> -
> - /* Setting Fixed Bias */
> - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
> - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
> -
> - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
> -
> - vlv_punit_put(dev_priv);
> -
> - /* RPS code assumes GPLL is used */
> - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
> -
> - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
> - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
> -
> - reset_rps(dev_priv, valleyview_set_rps);
> -
> - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -}
> -
> -static unsigned long intel_pxfreq(u32 vidfreq)
> -{
> - unsigned long freq;
> - int div = (vidfreq & 0x3f0000) >> 16;
> - int post = (vidfreq & 0x3000) >> 12;
> - int pre = (vidfreq & 0x7);
> -
> - if (!pre)
> - return 0;
> -
> - freq = ((div * 133333) / ((1<<post) * pre));
> -
> - return freq;
> -}
> -
> -static const struct cparams {
> - u16 i;
> - u16 t;
> - u16 m;
> - u16 c;
> -} cparams[] = {
> - { 1, 1333, 301, 28664 },
> - { 1, 1066, 294, 24460 },
> - { 1, 800, 294, 25192 },
> - { 0, 1333, 276, 27605 },
> - { 0, 1066, 276, 27605 },
> - { 0, 800, 231, 23784 },
> -};
> -
> -static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
> -{
> - u64 total_count, diff, ret;
> - u32 count1, count2, count3, m = 0, c = 0;
> - unsigned long now = jiffies_to_msecs(jiffies), diff1;
> - int i;
> -
> - lockdep_assert_held(&mchdev_lock);
> -
> - diff1 = now - dev_priv->ips.last_time1;
> -
> - /* Prevent division-by-zero if we are asking too fast.
> - * Also, we don't get interesting results if we are polling
> - * faster than once in 10ms, so just return the saved value
> - * in such cases.
> - */
> - if (diff1 <= 10)
> - return dev_priv->ips.chipset_power;
> -
> - count1 = I915_READ(DMIEC);
> - count2 = I915_READ(DDREC);
> - count3 = I915_READ(CSIEC);
> -
> - total_count = count1 + count2 + count3;
> -
> - /* FIXME: handle per-counter overflow */
> - if (total_count < dev_priv->ips.last_count1) {
> - diff = ~0UL - dev_priv->ips.last_count1;
> - diff += total_count;
> - } else {
> - diff = total_count - dev_priv->ips.last_count1;
> - }
> -
> - for (i = 0; i < ARRAY_SIZE(cparams); i++) {
> - if (cparams[i].i == dev_priv->ips.c_m &&
> - cparams[i].t == dev_priv->ips.r_t) {
> - m = cparams[i].m;
> - c = cparams[i].c;
> - break;
> - }
> - }
> -
> - diff = div_u64(diff, diff1);
> - ret = ((m * diff) + c);
> - ret = div_u64(ret, 10);
> -
> - dev_priv->ips.last_count1 = total_count;
> - dev_priv->ips.last_time1 = now;
> -
> - dev_priv->ips.chipset_power = ret;
> -
> - return ret;
> -}
> -
> -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
> -{
> - unsigned long val;
> -
> - if (!IS_GEN5(dev_priv))
> - return 0;
> -
> - intel_runtime_pm_get(dev_priv);
> - spin_lock_irq(&mchdev_lock);
> -
> - val = __i915_chipset_val(dev_priv);
> -
> - spin_unlock_irq(&mchdev_lock);
> - intel_runtime_pm_put(dev_priv);
> -
> - return val;
> -}
> -
> -unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
> -{
> - unsigned long m, x, b;
> - u32 tsfs;
> -
> - tsfs = I915_READ(TSFS);
> -
> - m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
> - x = I915_READ8(TR1);
> -
> - b = tsfs & TSFS_INTR_MASK;
> -
> - return ((m * x) / 127) - b;
> -}
> -
> -static int _pxvid_to_vd(u8 pxvid)
> -{
> - if (pxvid == 0)
> - return 0;
> -
> - if (pxvid >= 8 && pxvid < 31)
> - pxvid = 31;
> -
> - return (pxvid + 2) * 125;
> -}
> -
> -static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
> -{
> - const int vd = _pxvid_to_vd(pxvid);
> - const int vm = vd - 1125;
> -
> - if (INTEL_INFO(dev_priv)->is_mobile)
> - return vm > 0 ? vm : 0;
> -
> - return vd;
> -}
> -
> -static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
> -{
> - u64 now, diff, diffms;
> - u32 count;
> -
> - lockdep_assert_held(&mchdev_lock);
> -
> - now = ktime_get_raw_ns();
> - diffms = now - dev_priv->ips.last_time2;
> - do_div(diffms, NSEC_PER_MSEC);
> -
> - /* Don't divide by 0 */
> - if (!diffms)
> - return;
> -
> - count = I915_READ(GFXEC);
> -
> - if (count < dev_priv->ips.last_count2) {
> - diff = ~0UL - dev_priv->ips.last_count2;
> - diff += count;
> - } else {
> - diff = count - dev_priv->ips.last_count2;
> - }
> -
> - dev_priv->ips.last_count2 = count;
> - dev_priv->ips.last_time2 = now;
> -
> - /* More magic constants... */
> - diff = diff * 1181;
> - diff = div_u64(diff, diffms * 10);
> - dev_priv->ips.gfx_power = diff;
> -}
> -
> -void i915_update_gfx_val(struct drm_i915_private *dev_priv)
> -{
> - if (!IS_GEN5(dev_priv))
> - return;
> -
> - intel_runtime_pm_get(dev_priv);
> - spin_lock_irq(&mchdev_lock);
> -
> - __i915_update_gfx_val(dev_priv);
> -
> - spin_unlock_irq(&mchdev_lock);
> - intel_runtime_pm_put(dev_priv);
> -}
> -
> -static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
> -{
> - unsigned long t, corr, state1, corr2, state2;
> - u32 pxvid, ext_v;
> -
> - lockdep_assert_held(&mchdev_lock);
> -
> - pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
> - pxvid = (pxvid >> 24) & 0x7f;
> - ext_v = pvid_to_extvid(dev_priv, pxvid);
> -
> - state1 = ext_v;
> -
> - t = i915_mch_val(dev_priv);
> -
> - /* Revel in the empirically derived constants */
> -
> - /* Correction factor in 1/100000 units */
> - if (t > 80)
> - corr = ((t * 2349) + 135940);
> - else if (t >= 50)
> - corr = ((t * 964) + 29317);
> - else /* < 50 */
> - corr = ((t * 301) + 1004);
> -
> - corr = corr * ((150142 * state1) / 10000 - 78642);
> - corr /= 100000;
> - corr2 = (corr * dev_priv->ips.corr);
> -
> - state2 = (corr2 * state1) / 10000;
> - state2 /= 100; /* convert to mW */
> -
> - __i915_update_gfx_val(dev_priv);
> -
> - return dev_priv->ips.gfx_power + state2;
> -}
> -
> -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
> -{
> - unsigned long val;
> -
> - if (!IS_GEN5(dev_priv))
> - return 0;
> -
> - intel_runtime_pm_get(dev_priv);
> - spin_lock_irq(&mchdev_lock);
> -
> - val = __i915_gfx_val(dev_priv);
> -
> - spin_unlock_irq(&mchdev_lock);
> - intel_runtime_pm_put(dev_priv);
> -
> - return val;
> -}
> -
> -static struct drm_i915_private *i915_mch_dev;
> -
> -static struct drm_i915_private *mchdev_get(void)
> -{
> - struct drm_i915_private *i915;
> -
> - rcu_read_lock();
> - i915 = i915_mch_dev;
> - if (!kref_get_unless_zero(&i915->drm.ref))
> - i915 = NULL;
> - rcu_read_unlock();
> -
> - return i915;
> -}
> -
> -/**
> - * i915_read_mch_val - return value for IPS use
> - *
> - * Calculate and return a value for the IPS driver to use when deciding whether
> - * we have thermal and power headroom to increase CPU or GPU power budget.
> - */
> -unsigned long i915_read_mch_val(void)
> -{
> - struct drm_i915_private *i915;
> - unsigned long chipset_val, graphics_val;
> -
> - i915 = mchdev_get();
> - if (!i915)
> - return 0;
> -
> - intel_runtime_pm_get(i915);
> - spin_lock_irq(&mchdev_lock);
> - chipset_val = __i915_chipset_val(i915);
> - graphics_val = __i915_gfx_val(i915);
> - spin_unlock_irq(&mchdev_lock);
> - intel_runtime_pm_put(i915);
> -
> - drm_dev_put(&i915->drm);
> - return chipset_val + graphics_val;
> -}
> -EXPORT_SYMBOL_GPL(i915_read_mch_val);
> -
> -/**
> - * i915_gpu_raise - raise GPU frequency limit
> - *
> - * Raise the limit; IPS indicates we have thermal headroom.
> - */
> -bool i915_gpu_raise(void)
> -{
> - struct drm_i915_private *i915;
> -
> - i915 = mchdev_get();
> - if (!i915)
> - return false;
> -
> - spin_lock_irq(&mchdev_lock);
> - if (i915->ips.max_delay > i915->ips.fmax)
> - i915->ips.max_delay--;
> - spin_unlock_irq(&mchdev_lock);
> -
> - drm_dev_put(&i915->drm);
> - return true;
> -}
> -EXPORT_SYMBOL_GPL(i915_gpu_raise);
> -
> -/**
> - * i915_gpu_lower - lower GPU frequency limit
> - *
> - * IPS indicates we're close to a thermal limit, so throttle back the GPU
> - * frequency maximum.
> - */
> -bool i915_gpu_lower(void)
> -{
> - struct drm_i915_private *i915;
> -
> - i915 = mchdev_get();
> - if (!i915)
> - return false;
> -
> - spin_lock_irq(&mchdev_lock);
> - if (i915->ips.max_delay < i915->ips.min_delay)
> - i915->ips.max_delay++;
> - spin_unlock_irq(&mchdev_lock);
> -
> - drm_dev_put(&i915->drm);
> - return true;
> -}
> -EXPORT_SYMBOL_GPL(i915_gpu_lower);
> -
> -/**
> - * i915_gpu_busy - indicate GPU business to IPS
> - *
> - * Tell the IPS driver whether or not the GPU is busy.
> - */
> -bool i915_gpu_busy(void)
> -{
> - struct drm_i915_private *i915;
> - bool ret;
> -
> - i915 = mchdev_get();
> - if (!i915)
> - return false;
> -
> - ret = i915->gt.awake;
> -
> - drm_dev_put(&i915->drm);
> - return ret;
> -}
> -EXPORT_SYMBOL_GPL(i915_gpu_busy);
> -
> -/**
> - * i915_gpu_turbo_disable - disable graphics turbo
> - *
> - * Disable graphics turbo by resetting the max frequency and setting the
> - * current frequency to the default.
> - */
> -bool i915_gpu_turbo_disable(void)
> -{
> - struct drm_i915_private *i915;
> - bool ret;
> -
> - i915 = mchdev_get();
> - if (!i915)
> - return false;
> -
> - spin_lock_irq(&mchdev_lock);
> - i915->ips.max_delay = i915->ips.fstart;
> - ret = ironlake_set_drps(i915, i915->ips.fstart);
> - spin_unlock_irq(&mchdev_lock);
> -
> - drm_dev_put(&i915->drm);
> - return ret;
> -}
> -EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
> -
> -/**
> - * Tells the intel_ips driver that the i915 driver is now loaded, if
> - * IPS got loaded first.
> - *
> - * This awkward dance is so that neither module has to depend on the
> - * other in order for IPS to do the appropriate communication of
> - * GPU turbo limits to i915.
> - */
> -static void
> -ips_ping_for_i915_load(void)
> -{
> - void (*link)(void);
> -
> - link = symbol_get(ips_link_to_i915_driver);
> - if (link) {
> - link();
> - symbol_put(ips_link_to_i915_driver);
> - }
> -}
> -
> -void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
> -{
> - /* We only register the i915 ips part with intel-ips once everything is
> - * set up, to avoid intel-ips sneaking in and reading bogus values. */
> - smp_store_mb(i915_mch_dev, dev_priv);
> -
> - ips_ping_for_i915_load();
> -}
> -
> -void intel_gpu_ips_teardown(void)
> -{
> - smp_store_mb(i915_mch_dev, NULL);
> -}
> -
> -static void intel_init_emon(struct drm_i915_private *dev_priv)
> -{
> - u32 lcfuse;
> - u8 pxw[16];
> - int i;
> -
> - /* Disable to program */
> - I915_WRITE(ECR, 0);
> - POSTING_READ(ECR);
> -
> - /* Program energy weights for various events */
> - I915_WRITE(SDEW, 0x15040d00);
> - I915_WRITE(CSIEW0, 0x007f0000);
> - I915_WRITE(CSIEW1, 0x1e220004);
> - I915_WRITE(CSIEW2, 0x04000004);
> -
> - for (i = 0; i < 5; i++)
> - I915_WRITE(PEW(i), 0);
> - for (i = 0; i < 3; i++)
> - I915_WRITE(DEW(i), 0);
> -
> - /* Program P-state weights to account for frequency power adjustment */
> - for (i = 0; i < 16; i++) {
> - u32 pxvidfreq = I915_READ(PXVFREQ(i));
> - unsigned long freq = intel_pxfreq(pxvidfreq);
> - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
> - PXVFREQ_PX_SHIFT;
> - unsigned long val;
> -
> - val = vid * vid;
> - val *= (freq / 1000);
> - val *= 255;
> - val /= (127*127*900);
> - if (val > 0xff)
> - DRM_ERROR("bad pxval: %ld\n", val);
> - pxw[i] = val;
> - }
> - /* Render standby states get 0 weight */
> - pxw[14] = 0;
> - pxw[15] = 0;
> -
> - for (i = 0; i < 4; i++) {
> - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
> - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
> - I915_WRITE(PXW(i), val);
> - }
> -
> - /* Adjust magic regs to magic values (more experimental results) */
> - I915_WRITE(OGW0, 0);
> - I915_WRITE(OGW1, 0);
> - I915_WRITE(EG0, 0x00007f00);
> - I915_WRITE(EG1, 0x0000000e);
> - I915_WRITE(EG2, 0x000e0000);
> - I915_WRITE(EG3, 0x68000300);
> - I915_WRITE(EG4, 0x42000000);
> - I915_WRITE(EG5, 0x00140031);
> - I915_WRITE(EG6, 0);
> - I915_WRITE(EG7, 0);
> -
> - for (i = 0; i < 8; i++)
> - I915_WRITE(PXWL(i), 0);
> -
> - /* Enable PMON + select events */
> - I915_WRITE(ECR, 0x80000019);
> -
> - lcfuse = I915_READ(LCFUSE02);
> -
> - dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
> -}
> -
> -void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /*
> - * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
> - * requirement.
> - */
> - if (!sanitize_rc6(dev_priv)) {
> - DRM_INFO("RC6 disabled, disabling runtime PM support\n");
> - intel_runtime_pm_get(dev_priv);
> - }
> -
> - mutex_lock(&rps->lock);
> -
> - /* Initialize RPS limits (for userspace) */
> - if (IS_CHERRYVIEW(dev_priv))
> - cherryview_init_gt_powersave(dev_priv);
> - else if (IS_VALLEYVIEW(dev_priv))
> - valleyview_init_gt_powersave(dev_priv);
> - else if (INTEL_GEN(dev_priv) >= 6)
> - gen6_init_rps_frequencies(dev_priv);
> -
> - /* Derive initial user preferences/limits from the hardware limits */
> - rps->idle_freq = rps->min_freq;
> - rps->cur_freq = rps->idle_freq;
> -
> - rps->max_freq_softlimit = rps->max_freq;
> - rps->min_freq_softlimit = rps->min_freq;
> -
> - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> - rps->min_freq_softlimit =
> - max_t(int,
> - rps->efficient_freq,
> - intel_freq_opcode(dev_priv, 450));
> -
> - /* After setting max-softlimit, find the overclock max freq */
> - if (IS_GEN6(dev_priv) ||
> - IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
> - u32 params = 0;
> -
> - sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
> - if (params & BIT(31)) { /* OC supported */
> - DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
> - (rps->max_freq & 0xff) * 50,
> - (params & 0xff) * 50);
> - rps->max_freq = params & 0xff;
> - }
> - }
> -
> - /* Finally allow us to boost to max by default */
> - rps->boost_freq = rps->max_freq;
> -
> - mutex_unlock(&rps->lock);
> -}
> -
> -void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - if (IS_VALLEYVIEW(dev_priv))
> - valleyview_cleanup_gt_powersave(dev_priv);
> -
> - if (!HAS_RC6(dev_priv))
> - intel_runtime_pm_put(dev_priv);
> -}
> -
> -/**
> - * intel_suspend_gt_powersave - suspend PM work and helper threads
> - * @dev_priv: i915 device
> - *
> - * We don't want to disable RC6 or other features here, we just want
> - * to make sure any work we've queued has finished and won't bother
> - * us while we're suspended.
> - */
> -void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - if (INTEL_GEN(dev_priv) < 6)
> - return;
> -
> - /* gen6_rps_idle() will be called later to disable interrupts */
> -}
> -
> -void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
> - dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
> - intel_disable_gt_powersave(dev_priv);
> -
> - if (INTEL_GEN(dev_priv) < 11)
> - gen6_reset_rps_interrupts(dev_priv);
> - else
> - WARN_ON_ONCE(1);
> -}
> -
> -static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
> -{
> - lockdep_assert_held(&i915->gt_pm.rps.lock);
> -
> - if (!i915->gt_pm.llc_pstate.enabled)
> - return;
> -
> - /* Currently there is no HW configuration to be done to disable. */
> -
> - i915->gt_pm.llc_pstate.enabled = false;
> -}
> -
> -static void intel_disable_rc6(struct drm_i915_private *dev_priv)
> -{
> - lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
> -
> - if (!dev_priv->gt_pm.rc6.enabled)
> - return;
> -
> - if (INTEL_GEN(dev_priv) >= 9)
> - gen9_disable_rc6(dev_priv);
> - else if (IS_CHERRYVIEW(dev_priv))
> - cherryview_disable_rc6(dev_priv);
> - else if (IS_VALLEYVIEW(dev_priv))
> - valleyview_disable_rc6(dev_priv);
> - else if (INTEL_GEN(dev_priv) >= 6)
> - gen6_disable_rc6(dev_priv);
> -
> - dev_priv->gt_pm.rc6.enabled = false;
> -}
> -
> -static void intel_disable_rps(struct drm_i915_private *dev_priv)
> -{
> - lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
> -
> - if (!dev_priv->gt_pm.rps.enabled)
> - return;
> -
> - if (INTEL_GEN(dev_priv) >= 9)
> - gen9_disable_rps(dev_priv);
> - else if (IS_CHERRYVIEW(dev_priv))
> - cherryview_disable_rps(dev_priv);
> - else if (IS_VALLEYVIEW(dev_priv))
> - valleyview_disable_rps(dev_priv);
> - else if (INTEL_GEN(dev_priv) >= 6)
> - gen6_disable_rps(dev_priv);
> - else if (IS_IRONLAKE_M(dev_priv))
> - ironlake_disable_drps(dev_priv);
> -
> - dev_priv->gt_pm.rps.enabled = false;
> -}
> -
> -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - mutex_lock(&dev_priv->gt_pm.rps.lock);
> -
> - intel_disable_rc6(dev_priv);
> - intel_disable_rps(dev_priv);
> - if (HAS_LLC(dev_priv))
> - intel_disable_llc_pstate(dev_priv);
> -
> - mutex_unlock(&dev_priv->gt_pm.rps.lock);
> -}
> -
> -static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
> -{
> - lockdep_assert_held(&i915->gt_pm.rps.lock);
> -
> - if (i915->gt_pm.llc_pstate.enabled)
> - return;
> -
> - gen6_update_ring_freq(i915);
> -
> - i915->gt_pm.llc_pstate.enabled = true;
> -}
> -
> -static void intel_enable_rc6(struct drm_i915_private *dev_priv)
> -{
> - lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
> -
> - if (dev_priv->gt_pm.rc6.enabled)
> - return;
> -
> - if (IS_CHERRYVIEW(dev_priv))
> - cherryview_enable_rc6(dev_priv);
> - else if (IS_VALLEYVIEW(dev_priv))
> - valleyview_enable_rc6(dev_priv);
> - else if (INTEL_GEN(dev_priv) >= 9)
> - gen9_enable_rc6(dev_priv);
> - else if (IS_BROADWELL(dev_priv))
> - gen8_enable_rc6(dev_priv);
> - else if (INTEL_GEN(dev_priv) >= 6)
> - gen6_enable_rc6(dev_priv);
> -
> - dev_priv->gt_pm.rc6.enabled = true;
> -}
> -
> -static void intel_enable_rps(struct drm_i915_private *dev_priv)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - lockdep_assert_held(&rps->lock);
> -
> - if (rps->enabled)
> - return;
> -
> - if (IS_CHERRYVIEW(dev_priv)) {
> - cherryview_enable_rps(dev_priv);
> - } else if (IS_VALLEYVIEW(dev_priv)) {
> - valleyview_enable_rps(dev_priv);
> - } else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
> - /* TODO */
> - } else if (INTEL_GEN(dev_priv) >= 9) {
> - gen9_enable_rps(dev_priv);
> - } else if (IS_BROADWELL(dev_priv)) {
> - gen8_enable_rps(dev_priv);
> - } else if (INTEL_GEN(dev_priv) >= 6) {
> - gen6_enable_rps(dev_priv);
> - } else if (IS_IRONLAKE_M(dev_priv)) {
> - ironlake_enable_drps(dev_priv);
> - intel_init_emon(dev_priv);
> - }
> -
> - WARN_ON(rps->max_freq < rps->min_freq);
> - WARN_ON(rps->idle_freq > rps->max_freq);
> -
> - WARN_ON(rps->efficient_freq < rps->min_freq);
> - WARN_ON(rps->efficient_freq > rps->max_freq);
> -
> - rps->enabled = true;
> -}
> -
> -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
> -{
> - /* Powersaving is controlled by the host when inside a VM */
> - if (intel_vgpu_active(dev_priv))
> - return;
> -
> - mutex_lock(&dev_priv->gt_pm.rps.lock);
> -
> - if (HAS_RC6(dev_priv))
> - intel_enable_rc6(dev_priv);
> - intel_enable_rps(dev_priv);
> - if (HAS_LLC(dev_priv))
> - intel_enable_llc_pstate(dev_priv);
> -
> - mutex_unlock(&dev_priv->gt_pm.rps.lock);
> -}
> -
> -static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
> -{
> - /*
> - * On Ibex Peak and Cougar Point, we need to disable clock
> - * gating for the panel power sequencer or it will fail to
> - * start up when no ports are active.
> - */
> - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
> -}
> -
> -static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
> -{
> - enum pipe pipe;
> -
> - for_each_pipe(dev_priv, pipe) {
> - I915_WRITE(DSPCNTR(pipe),
> - I915_READ(DSPCNTR(pipe)) |
> - DISPPLANE_TRICKLE_FEED_DISABLE);
> -
> - I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
> - POSTING_READ(DSPSURF(pipe));
> - }
> -}
> -
> -static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
> -{
> - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
> -
> - /*
> - * Required for FBC
> - * WaFbcDisableDpfcClockGating:ilk
> - */
> - dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
> - ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
> - ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
> -
> - I915_WRITE(PCH_3DCGDIS0,
> - MARIUNIT_CLOCK_GATE_DISABLE |
> - SVSMUNIT_CLOCK_GATE_DISABLE);
> - I915_WRITE(PCH_3DCGDIS1,
> - VFMUNIT_CLOCK_GATE_DISABLE);
> -
> - /*
> - * According to the spec the following bits should be set in
> - * order to enable memory self-refresh
> - * The bit 22/21 of 0x42004
> - * The bit 5 of 0x42020
> - * The bit 15 of 0x45000
> - */
> - I915_WRITE(ILK_DISPLAY_CHICKEN2,
> - (I915_READ(ILK_DISPLAY_CHICKEN2) |
> - ILK_DPARB_GATE | ILK_VSDPFD_FULL));
> - dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
> - I915_WRITE(DISP_ARB_CTL,
> - (I915_READ(DISP_ARB_CTL) |
> - DISP_FBC_WM_DIS));
> -
> - /*
> - * Based on the document from hardware guys the following bits
> - * should be set unconditionally in order to enable FBC.
> - * The bit 22 of 0x42000
> - * The bit 22 of 0x42004
> - * The bit 7,8,9 of 0x42020.
> - */
> - if (IS_IRONLAKE_M(dev_priv)) {
> - /* WaFbcAsynchFlipDisableFbcQueue:ilk */
> - I915_WRITE(ILK_DISPLAY_CHICKEN1,
> - I915_READ(ILK_DISPLAY_CHICKEN1) |
> - ILK_FBCQ_DIS);
> - I915_WRITE(ILK_DISPLAY_CHICKEN2,
> - I915_READ(ILK_DISPLAY_CHICKEN2) |
> - ILK_DPARB_GATE);
> - }
> -
> - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
> -
> - I915_WRITE(ILK_DISPLAY_CHICKEN2,
> - I915_READ(ILK_DISPLAY_CHICKEN2) |
> - ILK_ELPIN_409_SELECT);
> - I915_WRITE(_3D_CHICKEN2,
> - _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
> - _3D_CHICKEN2_WM_READ_PIPELINED);
> -
> - /* WaDisableRenderCachePipelinedFlush:ilk */
> - I915_WRITE(CACHE_MODE_0,
> - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
> -
> - /* WaDisable_RenderCache_OperationalFlush:ilk */
> - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
> -
> - g4x_disable_trickle_feed(dev_priv);
> -
> - ibx_init_clock_gating(dev_priv);
> -}
> -
> -static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
> -{
> - int pipe;
> - uint32_t val;
> -
> - /*
> - * On Ibex Peak and Cougar Point, we need to disable clock
> - * gating for the panel power sequencer or it will fail to
> - * start up when no ports are active.
> - */
> - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
> - PCH_DPLUNIT_CLOCK_GATE_DISABLE |
> - PCH_CPUNIT_CLOCK_GATE_DISABLE);
> - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
> - DPLS_EDP_PPS_FIX_DIS);
> - /* The below fixes the weird display corruption, a few pixels shifted
> - * downward, on (only) LVDS of some HP laptops with IVY.
> - */
> - for_each_pipe(dev_priv, pipe) {
> - val = I915_READ(TRANS_CHICKEN2(pipe));
> - val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
> - val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
> - if (dev_priv->vbt.fdi_rx_polarity_inverted)
> - val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
> - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
> - val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
> - val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
> - I915_WRITE(TRANS_CHICKEN2(pipe), val);
> - }
> - /* WADP0ClockGatingDisable */
> - for_each_pipe(dev_priv, pipe) {
> - I915_WRITE(TRANS_CHICKEN1(pipe),
> - TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
> - }
> -}
> -
> -static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
> -{
> - uint32_t tmp;
> -
> - tmp = I915_READ(MCH_SSKPD);
> - if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
> - DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
> - tmp);
> -}
> -
> -static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
> -{
> - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
> -
> - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
> -
> - I915_WRITE(ILK_DISPLAY_CHICKEN2,
> - I915_READ(ILK_DISPLAY_CHICKEN2) |
> - ILK_ELPIN_409_SELECT);
> -
> - /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
> - I915_WRITE(_3D_CHICKEN,
> - _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
> -
> - /* WaDisable_RenderCache_OperationalFlush:snb */
> - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
> -
> - /*
> - * BSpec recoomends 8x4 when MSAA is used,
> - * however in practice 16x4 seems fastest.
> - *
> - * Note that PS/WM thread counts depend on the WIZ hashing
> - * disable bit, which we don't touch here, but it's good
> - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
> - */
> - I915_WRITE(GEN6_GT_MODE,
> - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
> -
> - I915_WRITE(CACHE_MODE_0,
> - _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
> -
> - I915_WRITE(GEN6_UCGCTL1,
> - I915_READ(GEN6_UCGCTL1) |
> - GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
> - GEN6_CSUNIT_CLOCK_GATE_DISABLE);
> -
> - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
> - * gating disable must be set. Failure to set it results in
> - * flickering pixels due to Z write ordering failures after
> - * some amount of runtime in the Mesa "fire" demo, and Unigine
> - * Sanctuary and Tropics, and apparently anything else with
> - * alpha test or pixel discard.
> - *
> - * According to the spec, bit 11 (RCCUNIT) must also be set,
> - * but we didn't debug actual testcases to find it out.
> - *
> - * WaDisableRCCUnitClockGating:snb
> - * WaDisableRCPBUnitClockGating:snb
> - */
> - I915_WRITE(GEN6_UCGCTL2,
> - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
> - GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
> -
> - /* WaStripsFansDisableFastClipPerformanceFix:snb */
> - I915_WRITE(_3D_CHICKEN3,
> - _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
> -
> - /*
> - * Bspec says:
> - * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
> - * 3DSTATE_SF number of SF output attributes is more than 16."
> - */
> - I915_WRITE(_3D_CHICKEN3,
> - _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
> -
> - /*
> - * According to the spec the following bits should be
> - * set in order to enable memory self-refresh and fbc:
> - * The bit21 and bit22 of 0x42000
> - * The bit21 and bit22 of 0x42004
> - * The bit5 and bit7 of 0x42020
> - * The bit14 of 0x70180
> - * The bit14 of 0x71180
> - *
> - * WaFbcAsynchFlipDisableFbcQueue:snb
> - */
> - I915_WRITE(ILK_DISPLAY_CHICKEN1,
> - I915_READ(ILK_DISPLAY_CHICKEN1) |
> - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
> - I915_WRITE(ILK_DISPLAY_CHICKEN2,
> - I915_READ(ILK_DISPLAY_CHICKEN2) |
> - ILK_DPARB_GATE | ILK_VSDPFD_FULL);
> - I915_WRITE(ILK_DSPCLK_GATE_D,
> - I915_READ(ILK_DSPCLK_GATE_D) |
> - ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
> - ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
> -
> - g4x_disable_trickle_feed(dev_priv);
> -
> - cpt_init_clock_gating(dev_priv);
> -
> - gen6_check_mch_setup(dev_priv);
> -}
> -
> -static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
> -{
> - uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
> -
> - /*
> - * WaVSThreadDispatchOverride:ivb,vlv
> - *
> - * This actually overrides the dispatch
> - * mode for all thread types.
> - */
> - reg &= ~GEN7_FF_SCHED_MASK;
> - reg |= GEN7_FF_TS_SCHED_HW;
> - reg |= GEN7_FF_VS_SCHED_HW;
> - reg |= GEN7_FF_DS_SCHED_HW;
> -
> - I915_WRITE(GEN7_FF_THREAD_MODE, reg);
> -}
> -
> -static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
> -{
> - /*
> - * TODO: this bit should only be enabled when really needed, then
> - * disabled when not needed anymore in order to save power.
> - */
> - if (HAS_PCH_LPT_LP(dev_priv))
> - I915_WRITE(SOUTH_DSPCLK_GATE_D,
> - I915_READ(SOUTH_DSPCLK_GATE_D) |
> - PCH_LP_PARTITION_LEVEL_DISABLE);
> + if (HAS_PCH_LPT_LP(dev_priv))
> + I915_WRITE(SOUTH_DSPCLK_GATE_D,
> + I915_READ(SOUTH_DSPCLK_GATE_D) |
> + PCH_LP_PARTITION_LEVEL_DISABLE);
>
> /* WADPOClockGatingDisable:hsw */
> I915_WRITE(TRANS_CHICKEN1(PIPE_A),
> @@ -9161,74 +6861,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
> }
> }
>
> -static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /*
> - * N = val - 0xb7
> - * Slow = Fast = GPLL ref * N
> - */
> - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
> -}
> -
> -static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
> -}
> -
> -static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /*
> - * N = val / 2
> - * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
> - */
> - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
> -}
> -
> -static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
> -{
> - struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> - /* CHV needs even values */
> - return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
> -}
> -
> -int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
> -{
> - if (INTEL_GEN(dev_priv) >= 9)
> - return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
> - GEN9_FREQ_SCALER);
> - else if (IS_CHERRYVIEW(dev_priv))
> - return chv_gpu_freq(dev_priv, val);
> - else if (IS_VALLEYVIEW(dev_priv))
> - return byt_gpu_freq(dev_priv, val);
> - else
> - return val * GT_FREQUENCY_MULTIPLIER;
> -}
> -
> -int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
> -{
> - if (INTEL_GEN(dev_priv) >= 9)
> - return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
> - GT_FREQUENCY_MULTIPLIER);
> - else if (IS_CHERRYVIEW(dev_priv))
> - return chv_freq_opcode(dev_priv, val);
> - else if (IS_VALLEYVIEW(dev_priv))
> - return byt_freq_opcode(dev_priv, val);
> - else
> - return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
> -}
> -
> void intel_pm_setup(struct drm_i915_private *dev_priv)
> {
> - mutex_init(&dev_priv->gt_pm.rps.lock);
> - atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
> -
> dev_priv->runtime_pm.suspended = false;
> atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
> }
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
> index 4df7c2ef8576..5aaf667c52ab 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -571,8 +571,6 @@ void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
>
> void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
> {
> - /* BIOS often leaves RC6 enabled, but disable it for hw init */
> - intel_sanitize_gt_powersave(dev_priv);
> }
>
> static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
--
Thanks,
Sagar
More information about the Intel-gfx
mailing list