[Intel-gfx] [PATCH] drm/i915: Use drm_i915_private as the native pointer for intel_uncore.c
Joonas Lahtinen
joonas.lahtinen at linux.intel.com
Mon May 9 15:59:58 UTC 2016
On ma, 2016-05-09 at 14:58 +0100, Chris Wilson wrote:
> Pass drm_i915_private to the uncore init/fini routines and their
> subservients as it is their native type.
>
> text data bss dec hex filename
> 6309978 3578778 696320 10585076 a183f4 vmlinux
> 6309530 3578778 696320 10584628 a18234 vmlinux
>
> a modest 400 bytes of saving, but 60 lines of code deleted!
Please, do copy & paste the sed command or cocci file to the commit
message, otherwise looks fine to me. I would do this for plenty of
other functions too and the for the s/dev_priv/i915/. Do you have a
patch for the preceding s/i915/i915_module/ patch, or should I go and
write it?
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Regards, Joonas
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 4 +-
> drivers/gpu/drm/i915/i915_dma.c | 13 +-
> drivers/gpu/drm/i915/i915_drv.c | 26 +--
> drivers/gpu/drm/i915/i915_drv.h | 18 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 9 +-
> drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +-
> drivers/gpu/drm/i915/i915_guc_submission.c | 3 +-
> drivers/gpu/drm/i915/i915_irq.c | 5 +-
> drivers/gpu/drm/i915/i915_sysfs.c | 4 +-
> drivers/gpu/drm/i915/i915_vgpu.c | 7 +-
> drivers/gpu/drm/i915/i915_vgpu.h | 2 +-
> drivers/gpu/drm/i915/intel_display.c | 9 +-
> drivers/gpu/drm/i915/intel_drv.h | 22 +-
> drivers/gpu/drm/i915/intel_pm.c | 342 ++++++++++++-----------------
> drivers/gpu/drm/i915/intel_uncore.c | 162 +++++++-------
> 15 files changed, 283 insertions(+), 345 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 6698957ede3f..428ca998cd2e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -4990,7 +4990,7 @@ i915_max_freq_set(void *data, u64 val)
>
> dev_priv->rps.max_freq_softlimit = val;
>
> - intel_set_rps(dev, val);
> + intel_set_rps(dev_priv, val);
>
> mutex_unlock(&dev_priv->rps.hw_lock);
>
> @@ -5057,7 +5057,7 @@ i915_min_freq_set(void *data, u64 val)
>
> dev_priv->rps.min_freq_softlimit = val;
>
> - intel_set_rps(dev, val);
> + intel_set_rps(dev_priv, val);
>
> mutex_unlock(&dev_priv->rps.hw_lock);
>
> diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> index 46ac1da64a09..9208f5e3c737 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
> return -ENODEV;
> break;
> case I915_PARAM_HAS_GPU_RESET:
> - value = i915.enable_hangcheck &&
> - intel_has_gpu_reset(dev);
> + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
> break;
> case I915_PARAM_HAS_RESOURCE_STREAMER:
> value = HAS_RESOURCE_STREAMER(dev);
> @@ -427,6 +426,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
>
> static void i915_gem_fini(struct drm_device *dev)
> {
> + struct drm_i915_private *dev_priv = to_i915(dev);
> +
> /*
> * Neither the BIOS, ourselves or any other kernel
> * expects the system to be in execlists mode on startup,
> @@ -447,7 +448,7 @@ static void i915_gem_fini(struct drm_device *dev)
> * machine in an unusable condition.
> */
> if (HAS_HW_CONTEXTS(dev)) {
> - int reset = intel_gpu_reset(dev, ALL_ENGINES);
> + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
> WARN_ON(reset && reset != -ENODEV);
> }
>
> @@ -1189,7 +1190,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
> if (ret < 0)
> goto put_bridge;
>
> - intel_uncore_init(dev);
> + intel_uncore_init(dev_priv);
>
> return 0;
>
> @@ -1207,7 +1208,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
> {
> struct drm_device *dev = dev_priv->dev;
>
> - intel_uncore_fini(dev);
> + intel_uncore_fini(dev_priv);
> i915_mmio_cleanup(dev);
> pci_dev_put(dev_priv->bridge_dev);
> }
> @@ -1282,7 +1283,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
> pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
> PM_QOS_DEFAULT_VALUE);
>
> - intel_uncore_sanitize(dev);
> + intel_uncore_sanitize(dev_priv);
>
> intel_opregion_setup(dev);
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index e8f7a097073c..2d97c97dece0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -606,7 +606,7 @@ static int i915_drm_suspend(struct drm_device *dev)
>
> intel_guc_suspend(dev);
>
> - intel_suspend_gt_powersave(dev);
> + intel_suspend_gt_powersave(dev_priv);
>
> intel_display_suspend(dev);
>
> @@ -626,7 +626,7 @@ static int i915_drm_suspend(struct drm_device *dev)
> opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
> intel_opregion_notify_adapter(dev, opregion_target_state);
>
> - intel_uncore_forcewake_reset(dev, false);
> + intel_uncore_forcewake_reset(dev_priv, false);
> intel_opregion_fini(dev);
>
> intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
> @@ -861,9 +861,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
> DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
> ret);
>
> - intel_uncore_early_sanitize(dev, true);
> + intel_uncore_early_sanitize(dev_priv, true);
>
> - if (IS_BROXTON(dev)) {
> + if (IS_BROXTON(dev_priv)) {
> if (!dev_priv->suspended_to_idle)
> gen9_sanitize_dc_state(dev_priv);
> bxt_disable_dc9(dev_priv);
> @@ -871,7 +871,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
> hsw_disable_pc8(dev_priv);
> }
>
> - intel_uncore_sanitize(dev);
> + intel_uncore_sanitize(dev_priv);
>
> if (IS_BROXTON(dev_priv) ||
> !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
> @@ -921,7 +921,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
> unsigned reset_counter;
> int ret;
>
> - intel_reset_gt_powersave(dev);
> + intel_reset_gt_powersave(dev_priv);
>
> mutex_lock(&dev->struct_mutex);
>
> @@ -937,7 +937,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
>
> i915_gem_reset(dev);
>
> - ret = intel_gpu_reset(dev, ALL_ENGINES);
> + ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
>
> /* Also reset the gpu hangman. */
> if (error->stop_rings != 0) {
> @@ -992,7 +992,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
> * of re-init after reset.
> */
> if (INTEL_INFO(dev)->gen > 5)
> - intel_enable_gt_powersave(dev);
> + intel_enable_gt_powersave(dev_priv);
>
> return 0;
>
> @@ -1469,7 +1469,7 @@ static int intel_runtime_suspend(struct device *device)
> struct drm_i915_private *dev_priv = dev->dev_private;
> int ret;
>
> - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
> + if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
> return -ENODEV;
>
> if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
> @@ -1508,7 +1508,7 @@ static int intel_runtime_suspend(struct device *device)
>
> intel_guc_suspend(dev);
>
> - intel_suspend_gt_powersave(dev);
> + intel_suspend_gt_powersave(dev_priv);
> intel_runtime_pm_disable_interrupts(dev_priv);
>
> ret = 0;
> @@ -1530,7 +1530,7 @@ static int intel_runtime_suspend(struct device *device)
> return ret;
> }
>
> - intel_uncore_forcewake_reset(dev, false);
> + intel_uncore_forcewake_reset(dev_priv, false);
>
> enable_rpm_wakeref_asserts(dev_priv);
> WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
> @@ -1611,7 +1611,7 @@ static int intel_runtime_resume(struct device *device)
> * we can do is to hope that things will still work (and disable RPM).
> */
> i915_gem_init_swizzling(dev);
> - gen6_update_ring_freq(dev);
> + gen6_update_ring_freq(dev_priv);
>
> intel_runtime_pm_enable_interrupts(dev_priv);
>
> @@ -1623,7 +1623,7 @@ static int intel_runtime_resume(struct device *device)
> if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
> intel_hpd_init(dev_priv);
>
> - intel_enable_gt_powersave(dev);
> + intel_enable_gt_powersave(dev_priv);
>
> enable_rpm_wakeref_asserts(dev_priv);
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 26e7de415966..4abd39f32c0f 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2777,8 +2777,8 @@ extern void i915_driver_postclose(struct drm_device *dev,
> extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
> unsigned long arg);
> #endif
> -extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
> -extern bool intel_has_gpu_reset(struct drm_device *dev);
> +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
> +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
> extern int i915_reset(struct drm_i915_private *dev_priv);
> extern int intel_guc_reset(struct drm_i915_private *dev_priv);
> extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
> @@ -2807,14 +2807,15 @@ extern void intel_irq_init(struct drm_i915_private *dev_priv);
> int intel_irq_install(struct drm_i915_private *dev_priv);
> void intel_irq_uninstall(struct drm_i915_private *dev_priv);
>
> -extern void intel_uncore_sanitize(struct drm_device *dev);
> -extern void intel_uncore_early_sanitize(struct drm_device *dev,
> +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
> +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
> bool restore_forcewake);
> -extern void intel_uncore_init(struct drm_device *dev);
> +extern void intel_uncore_init(struct drm_i915_private *dev_priv);
> extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
> extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
> -extern void intel_uncore_fini(struct drm_device *dev);
> -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
> +extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
> +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
> + bool restore);
> const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
> void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
> enum forcewake_domains domains);
> @@ -3547,11 +3548,10 @@ extern void i915_redisable_vga(struct drm_device *dev);
> extern void i915_redisable_vga_power_on(struct drm_device *dev);
> extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
> extern void intel_init_pch_refclk(struct drm_device *dev);
> -extern void intel_set_rps(struct drm_device *dev, u8 val);
> +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
> extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
> bool enable);
> extern void intel_detect_pch(struct drm_device *dev);
> -extern int intel_enable_rc6(const struct drm_device *dev);
>
> extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
> int i915_reg_read_ioctl(struct drm_device *dev, void *data,
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 667f0e859671..41bbb6a2435e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2279,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
> dev_priv->mm.interruptible = interruptible;
> }
>
> -void i915_check_and_clear_faults(struct drm_device *dev)
> +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
>
> - if (INTEL_INFO(dev)->gen < 6)
> + if (INTEL_INFO(dev_priv)->gen < 6)
> return;
>
> for_each_engine(engine, dev_priv) {
> @@ -2328,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
> if (INTEL_INFO(dev)->gen < 6)
> return;
>
> - i915_check_and_clear_faults(dev);
> + i915_check_and_clear_faults(dev_priv);
>
> ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
> true);
> @@ -3240,7 +3239,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
> struct i915_vma *vma;
> bool flush;
>
> - i915_check_and_clear_faults(dev);
> + i915_check_and_clear_faults(dev_priv);
>
> /* First fill our portion of the GTT with scratch pages */
> ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index 18af3af18754..144870ea5488 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -535,7 +535,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
> kref_put(&ppgtt->ref, i915_ppgtt_release);
> }
>
> -void i915_check_and_clear_faults(struct drm_device *dev);
> +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
> void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
> void i915_gem_restore_gtt_mappings(struct drm_device *dev);
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index a304b0ead099..169242a8adff 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
>
> data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
> /* WaRsDisableCoarsePowerGating:skl,bxt */
> - if (!intel_enable_rc6(dev) ||
> - NEEDS_WaRsDisableCoarsePowerGating(dev))
> + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
> data[1] = 0;
> else
> /* bit 0 and 1 are for Render and Media domain separately */
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index a163037ddbd8..2fa26159d6e5 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
> __gen6_disable_pm_irq(dev_priv, mask);
> }
>
> -void gen6_reset_rps_interrupts(struct drm_device *dev)
> +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> i915_reg_t reg = gen6_pm_iir(dev_priv);
>
> spin_lock_irq(&dev_priv->irq_lock);
> @@ -1168,7 +1167,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
> new_delay += adj;
> new_delay = clamp_t(int, new_delay, min, max);
>
> - intel_set_rps(dev_priv->dev, new_delay);
> + intel_set_rps(dev_priv, new_delay);
>
> mutex_unlock(&dev_priv->rps.hw_lock);
> out:
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index 2d576b7ff299..d01add648e82 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -397,7 +397,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
> /* We still need *_set_rps to process the new max_delay and
> * update the interrupt limits and PMINTRMSK even though
> * frequency request may be unchanged. */
> - intel_set_rps(dev, val);
> + intel_set_rps(dev_priv, val);
>
> mutex_unlock(&dev_priv->rps.hw_lock);
>
> @@ -461,7 +461,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
> /* We still need *_set_rps to process the new min_delay and
> * update the interrupt limits and PMINTRMSK even though
> * frequency request may be unchanged. */
> - intel_set_rps(dev, val);
> + intel_set_rps(dev_priv, val);
>
> mutex_unlock(&dev_priv->rps.hw_lock);
>
> diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
> index d02efb8cad4d..d5a7a5e7ee7e 100644
> --- a/drivers/gpu/drm/i915/i915_vgpu.c
> +++ b/drivers/gpu/drm/i915/i915_vgpu.c
> @@ -58,15 +58,14 @@
> * This function is called at the initialization stage, to detect whether
> * running on a vGPU.
> */
> -void i915_check_vgpu(struct drm_device *dev)
> +void i915_check_vgpu(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> uint64_t magic;
> uint32_t version;
>
> BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
>
> - if (!IS_HASWELL(dev))
> + if (!IS_HASWELL(dev_priv))
> return;
>
> magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
> @@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
>
> /**
> * intel_vgt_balloon - balloon out reserved graphics address trunks
> - * @dev: drm device
> + * @dev_priv: i915 device
> *
> * This function is called at the initialization stage, to balloon out the
> * graphic address space allocated to other vGPUs, by marking these spaces as
> diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
> index 3c83b47b5f69..21ffcfea5f5d 100644
> --- a/drivers/gpu/drm/i915/i915_vgpu.h
> +++ b/drivers/gpu/drm/i915/i915_vgpu.h
> @@ -110,7 +110,7 @@ struct vgt_if {
> #define VGT_DRV_DISPLAY_NOT_READY 0
> #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
>
> -extern void i915_check_vgpu(struct drm_device *dev);
> +extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
> extern int intel_vgt_balloon(struct drm_device *dev);
> extern void intel_vgt_deballoon(void);
>
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 3c6b7b9aff08..e4b49aa4ab8d 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -15283,7 +15283,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
> dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
>
> intel_init_clock_gating(dev);
> - intel_enable_gt_powersave(dev);
> + intel_enable_gt_powersave(dev_priv);
> }
>
> /*
> @@ -16011,11 +16011,12 @@ retry:
>
> void intel_modeset_gem_init(struct drm_device *dev)
> {
> + struct drm_i915_private *dev_priv = to_i915(dev);
> struct drm_crtc *c;
> struct drm_i915_gem_object *obj;
> int ret;
>
> - intel_init_gt_powersave(dev);
> + intel_init_gt_powersave(dev_priv);
>
> intel_modeset_init_hw(dev);
>
> @@ -16062,7 +16063,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_connector *connector;
>
> - intel_disable_gt_powersave(dev);
> + intel_disable_gt_powersave(dev_priv);
>
> intel_backlight_unregister(dev);
>
> @@ -16094,7 +16095,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
>
> intel_cleanup_overlay(dev);
>
> - intel_cleanup_gt_powersave(dev);
> + intel_cleanup_gt_powersave(dev_priv);
>
> intel_teardown_gmbus(dev);
> }
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 94a44af37ef7..1a3e69e5f9f5 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -1032,7 +1032,7 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
> void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
> void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
> void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
> -void gen6_reset_rps_interrupts(struct drm_device *dev);
> +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
> void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
> void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
> u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
> @@ -1612,13 +1612,13 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
> void intel_pm_setup(struct drm_device *dev);
> void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
> void intel_gpu_ips_teardown(void);
> -void intel_init_gt_powersave(struct drm_device *dev);
> -void intel_cleanup_gt_powersave(struct drm_device *dev);
> -void intel_enable_gt_powersave(struct drm_device *dev);
> -void intel_disable_gt_powersave(struct drm_device *dev);
> -void intel_suspend_gt_powersave(struct drm_device *dev);
> -void intel_reset_gt_powersave(struct drm_device *dev);
> -void gen6_update_ring_freq(struct drm_device *dev);
> +void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
> +void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
> void gen6_rps_busy(struct drm_i915_private *dev_priv);
> void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
> void gen6_rps_idle(struct drm_i915_private *dev_priv);
> @@ -1633,7 +1633,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
> struct skl_ddb_allocation *ddb /* out */);
> uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
> bool ilk_disable_lp_wm(struct drm_device *dev);
> -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
> +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
> +static inline int intel_enable_rc6(void)
> +{
> + return i915.enable_rc6;
> +}
>
> /* intel_sdvo.c */
> bool intel_sdvo_init(struct drm_device *dev,
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 299b6cd61f69..c6de7a197a84 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -4419,12 +4419,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
> /* gen6_set_rps is called to update the frequency request, but should also be
> * called when the range (min_delay and max_delay) is modified so that we can
> * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
> -static void gen6_set_rps(struct drm_device *dev, u8 val)
> +static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
> - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
> + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
> return;
>
> WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
> @@ -4437,10 +4435,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
> if (val != dev_priv->rps.cur_freq) {
> gen6_set_rps_thresholds(dev_priv, val);
>
> - if (IS_GEN9(dev))
> + if (IS_GEN9(dev_priv))
> I915_WRITE(GEN6_RPNSWREQ,
> GEN9_FREQUENCY(val));
> - else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
> + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> I915_WRITE(GEN6_RPNSWREQ,
> HSW_FREQUENCY(val));
> else
> @@ -4462,15 +4460,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
> trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
> }
>
> -static void valleyview_set_rps(struct drm_device *dev, u8 val)
> +static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
> WARN_ON(val > dev_priv->rps.max_freq);
> WARN_ON(val < dev_priv->rps.min_freq);
>
> - if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
> + if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
> "Odd GPU freq value\n"))
> val &= ~1;
>
> @@ -4503,7 +4499,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
> /* Wake up the media well, as that takes a lot less
> * power than the Render well. */
> intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
> - valleyview_set_rps(dev_priv->dev, val);
> + valleyview_set_rps(dev_priv, val);
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
> }
>
> @@ -4521,14 +4517,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
>
> void gen6_rps_idle(struct drm_i915_private *dev_priv)
> {
> - struct drm_device *dev = dev_priv->dev;
> -
> mutex_lock(&dev_priv->rps.hw_lock);
> if (dev_priv->rps.enabled) {
> - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
> + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> vlv_set_rps_idle(dev_priv);
> else
> - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
> dev_priv->rps.last_adj = 0;
> I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
> }
> @@ -4576,49 +4570,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
> spin_unlock(&dev_priv->rps.client_lock);
> }
>
> -void intel_set_rps(struct drm_device *dev, u8 val)
> +void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
> {
> - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
> - valleyview_set_rps(dev, val);
> + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> + valleyview_set_rps(dev_priv, val);
> else
> - gen6_set_rps(dev, val);
> + gen6_set_rps(dev_priv, val);
> }
>
> -static void gen9_disable_rc6(struct drm_device *dev)
> +static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> I915_WRITE(GEN6_RC_CONTROL, 0);
> I915_WRITE(GEN9_PG_ENABLE, 0);
> }
>
> -static void gen9_disable_rps(struct drm_device *dev)
> +static void gen9_disable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> I915_WRITE(GEN6_RP_CONTROL, 0);
> }
>
> -static void gen6_disable_rps(struct drm_device *dev)
> +static void gen6_disable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> I915_WRITE(GEN6_RC_CONTROL, 0);
> I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
> I915_WRITE(GEN6_RP_CONTROL, 0);
> }
>
> -static void cherryview_disable_rps(struct drm_device *dev)
> +static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> I915_WRITE(GEN6_RC_CONTROL, 0);
> }
>
> -static void valleyview_disable_rps(struct drm_device *dev)
> +static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> /* we're doing forcewake before Disabling RC6,
> * This what the BIOS expects when going into suspend */
> intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> @@ -4628,15 +4612,15 @@ static void valleyview_disable_rps(struct drm_device *dev)
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
>
> -static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
> +static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
> {
> - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
> + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
> mode = GEN6_RC_CTL_RC6_ENABLE;
> else
> mode = 0;
> }
> - if (HAS_RC6p(dev))
> + if (HAS_RC6p(dev_priv))
> DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
> onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
> onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
> @@ -4647,9 +4631,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
> onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
> }
>
> -static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
> +static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> struct i915_ggtt *ggtt = &dev_priv->ggtt;
> bool enable_rc6 = true;
> unsigned long rc6_ctx_base;
> @@ -4690,16 +4673,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
> return enable_rc6;
> }
>
> -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
> +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
> {
> /* No RC6 before Ironlake and code is gone for ilk. */
> - if (INTEL_INFO(dev)->gen < 6)
> + if (INTEL_INFO(dev_priv)->gen < 6)
> return 0;
>
> if (!enable_rc6)
> return 0;
>
> - if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
> + if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
> DRM_INFO("RC6 disabled by BIOS\n");
> return 0;
> }
> @@ -4708,7 +4691,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
> if (enable_rc6 >= 0) {
> int mask;
>
> - if (HAS_RC6p(dev))
> + if (HAS_RC6p(dev_priv))
> mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
> INTEL_RC6pp_ENABLE;
> else
> @@ -4721,20 +4704,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
> return enable_rc6 & mask;
> }
>
> - if (IS_IVYBRIDGE(dev))
> + if (IS_IVYBRIDGE(dev_priv))
> return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
>
> return INTEL_RC6_ENABLE;
> }
>
> -int intel_enable_rc6(const struct drm_device *dev)
> -{
> - return i915.enable_rc6;
> -}
> -
> -static void gen6_init_rps_frequencies(struct drm_device *dev)
> +static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> uint32_t rp_state_cap;
> u32 ddcc_status = 0;
> int ret;
> @@ -4742,7 +4719,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
> /* All of these values are in units of 50MHz */
> dev_priv->rps.cur_freq = 0;
> /* static values from HW: RP0 > RP1 > RPn (min_freq) */
> - if (IS_BROXTON(dev)) {
> + if (IS_BROXTON(dev_priv)) {
> rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
> dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
> dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
> @@ -4758,8 +4735,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
> dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
>
> dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
> - if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
> - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
> + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
> + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
> ret = sandybridge_pcode_read(dev_priv,
> HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
> &ddcc_status);
> @@ -4771,7 +4748,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
> dev_priv->rps.max_freq);
> }
>
> - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
> + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
> /* Store the frequency values in 16.66 MHZ units, which is
> the natural hardware unit for SKL */
> dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
> @@ -4788,7 +4765,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
> dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
>
> if (dev_priv->rps.min_freq_softlimit == 0) {
> - if (IS_HASWELL(dev) || IS_BROADWELL(dev))
> + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> dev_priv->rps.min_freq_softlimit =
> max_t(int, dev_priv->rps.efficient_freq,
> intel_freq_opcode(dev_priv, 450));
> @@ -4799,16 +4776,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
> }
>
> /* See the Gen9_GT_PM_Programming_Guide doc for the below */
> -static void gen9_enable_rps(struct drm_device *dev)
> +static void gen9_enable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
>
> - gen6_init_rps_frequencies(dev);
> + gen6_init_rps_frequencies(dev_priv);
>
> /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
> - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
> + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
> /*
> * BIOS could leave the Hw Turbo enabled, so need to explicitly
> * clear out the Control register just to avoid inconsitency
> @@ -4818,7 +4793,7 @@ static void gen9_enable_rps(struct drm_device *dev)
> * if the Turbo is left enabled in the Control register, as the
> * Up/Down interrupts would remain masked.
> */
> - gen9_disable_rps(dev);
> + gen9_disable_rps(dev_priv);
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> return;
> }
> @@ -4837,14 +4812,13 @@ static void gen9_enable_rps(struct drm_device *dev)
> * Up/Down EI & threshold registers, as well as the RP_CONTROL,
> * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
> dev_priv->rps.power = HIGH_POWER; /* force a reset */
> - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
>
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
>
> -static void gen9_enable_rc6(struct drm_device *dev)
> +static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> uint32_t rc6_mask = 0;
>
> @@ -4861,7 +4835,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
> /* 2b: Program RC6 thresholds.*/
>
> /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
> - if (IS_SKYLAKE(dev))
> + if (IS_SKYLAKE(dev_priv))
> I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
> else
> I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
> @@ -4870,7 +4844,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
> for_each_engine(engine, dev_priv)
> I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
>
> - if (HAS_GUC_UCODE(dev))
> + if (HAS_GUC_UCODE(dev_priv))
> I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
>
> I915_WRITE(GEN6_RC_SLEEP, 0);
> @@ -4880,12 +4854,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
> I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
>
> /* 3a: Enable RC6 */
> - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
> + if (intel_enable_rc6() & INTEL_RC6_ENABLE)
> rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
> DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
> /* WaRsUseTimeoutMode */
> - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
> - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
> + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
> + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
> I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
> I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
> GEN7_RC_CTL_TO_MODE |
> @@ -4901,19 +4875,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
> * 3b: Enable Coarse Power Gating only when RC6 is enabled.
> * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
> */
> - if (NEEDS_WaRsDisableCoarsePowerGating(dev))
> + if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
> I915_WRITE(GEN9_PG_ENABLE, 0);
> else
> I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
> (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
>
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> -
> }
>
> -static void gen8_enable_rps(struct drm_device *dev)
> +static void gen8_enable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> uint32_t rc6_mask = 0;
>
> @@ -4928,7 +4900,7 @@ static void gen8_enable_rps(struct drm_device *dev)
> I915_WRITE(GEN6_RC_CONTROL, 0);
>
> /* Initialize rps frequencies */
> - gen6_init_rps_frequencies(dev);
> + gen6_init_rps_frequencies(dev_priv);
>
> /* 2b: Program RC6 thresholds.*/
> I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
> @@ -4937,16 +4909,16 @@ static void gen8_enable_rps(struct drm_device *dev)
> for_each_engine(engine, dev_priv)
> I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
> I915_WRITE(GEN6_RC_SLEEP, 0);
> - if (IS_BROADWELL(dev))
> + if (IS_BROADWELL(dev_priv))
> I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
> else
> I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
>
> /* 3: Enable RC6 */
> - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
> + if (intel_enable_rc6() & INTEL_RC6_ENABLE)
> rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
> - intel_print_rc6_info(dev, rc6_mask);
> - if (IS_BROADWELL(dev))
> + intel_print_rc6_info(dev_priv, rc6_mask);
> + if (IS_BROADWELL(dev_priv))
> I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
> GEN7_RC_CTL_TO_MODE |
> rc6_mask);
> @@ -4987,14 +4959,13 @@ static void gen8_enable_rps(struct drm_device *dev)
> /* 6: Ring frequency + overclocking (our driver does this later */
>
> dev_priv->rps.power = HIGH_POWER; /* force a reset */
> - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
>
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
>
> -static void gen6_enable_rps(struct drm_device *dev)
> +static void gen6_enable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
> u32 gtfifodbg;
> @@ -5021,7 +4992,7 @@ static void gen6_enable_rps(struct drm_device *dev)
> intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
>
> /* Initialize rps frequencies */
> - gen6_init_rps_frequencies(dev);
> + gen6_init_rps_frequencies(dev_priv);
>
> /* disable the counters and set deterministic thresholds */
> I915_WRITE(GEN6_RC_CONTROL, 0);
> @@ -5037,7 +5008,7 @@ static void gen6_enable_rps(struct drm_device *dev)
>
> I915_WRITE(GEN6_RC_SLEEP, 0);
> I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
> - if (IS_IVYBRIDGE(dev))
> + if (IS_IVYBRIDGE(dev_priv))
> I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
> else
> I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
> @@ -5045,12 +5016,12 @@ static void gen6_enable_rps(struct drm_device *dev)
> I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
>
> /* Check if we are enabling RC6 */
> - rc6_mode = intel_enable_rc6(dev_priv->dev);
> + rc6_mode = intel_enable_rc6();
> if (rc6_mode & INTEL_RC6_ENABLE)
> rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
>
> /* We don't use those on Haswell */
> - if (!IS_HASWELL(dev)) {
> + if (!IS_HASWELL(dev_priv)) {
> if (rc6_mode & INTEL_RC6p_ENABLE)
> rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
>
> @@ -5058,7 +5029,7 @@ static void gen6_enable_rps(struct drm_device *dev)
> rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
> }
>
> - intel_print_rc6_info(dev, rc6_mask);
> + intel_print_rc6_info(dev_priv, rc6_mask);
>
> I915_WRITE(GEN6_RC_CONTROL,
> rc6_mask |
> @@ -5082,13 +5053,13 @@ static void gen6_enable_rps(struct drm_device *dev)
> }
>
> dev_priv->rps.power = HIGH_POWER; /* force a reset */
> - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
>
> rc6vids = 0;
> ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
> - if (IS_GEN6(dev) && ret) {
> + if (IS_GEN6(dev_priv) && ret) {
> DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
> - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
> + } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
> DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
> GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
> rc6vids &= 0xffff00;
> @@ -5101,9 +5072,8 @@ static void gen6_enable_rps(struct drm_device *dev)
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
>
> -static void __gen6_update_ring_freq(struct drm_device *dev)
> +static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> int min_freq = 15;
> unsigned int gpu_freq;
> unsigned int max_ia_freq, min_ring_freq;
> @@ -5132,7 +5102,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
> /* convert DDR frequency from units of 266.6MHz to bandwidth */
> min_ring_freq = mult_frac(min_ring_freq, 8, 3);
>
> - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
> + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
> /* Convert GT frequency to 50 HZ units */
> min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
> max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
> @@ -5150,16 +5120,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
> int diff = max_gpu_freq - gpu_freq;
> unsigned int ia_freq = 0, ring_freq = 0;
>
> - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
> + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
> /*
> * ring_freq = 2 * GT. ring_freq is in 100MHz units
> * No floor required for ring frequency on SKL.
> */
> ring_freq = gpu_freq;
> - } else if (INTEL_INFO(dev)->gen >= 8) {
> + } else if (INTEL_INFO(dev_priv)->gen >= 8) {
> /* max(2 * GT, DDR). NB: GT is 50MHz units */
> ring_freq = max(min_ring_freq, gpu_freq);
> - } else if (IS_HASWELL(dev)) {
> + } else if (IS_HASWELL(dev_priv)) {
> ring_freq = mult_frac(gpu_freq, 5, 4);
> ring_freq = max(min_ring_freq, ring_freq);
> /* leave ia_freq as the default, chosen by cpufreq */
> @@ -5186,26 +5156,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
> }
> }
>
> -void gen6_update_ring_freq(struct drm_device *dev)
> +void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> - if (!HAS_CORE_RING_FREQ(dev))
> + if (!HAS_CORE_RING_FREQ(dev_priv))
> return;
>
> mutex_lock(&dev_priv->rps.hw_lock);
> - __gen6_update_ring_freq(dev);
> + __gen6_update_ring_freq(dev_priv);
> mutex_unlock(&dev_priv->rps.hw_lock);
> }
>
> static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
> {
> - struct drm_device *dev = dev_priv->dev;
> u32 val, rp0;
>
> val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
>
> - switch (INTEL_INFO(dev)->eu_total) {
> + switch (INTEL_INFO(dev_priv)->eu_total) {
> case 8:
> /* (2 * 4) config */
> rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
> @@ -5316,9 +5283,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
> WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
> }
>
> -static void cherryview_setup_pctx(struct drm_device *dev)
> +static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> struct i915_ggtt *ggtt = &dev_priv->ggtt;
> unsigned long pctx_paddr, paddr;
> u32 pcbr;
> @@ -5337,15 +5303,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
> DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
> }
>
> -static void valleyview_setup_pctx(struct drm_device *dev)
> +static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct drm_i915_gem_object *pctx;
> unsigned long pctx_paddr;
> u32 pcbr;
> int pctx_size = 24*1024;
>
> - mutex_lock(&dev->struct_mutex);
> + mutex_lock(&dev_priv->dev->struct_mutex);
>
> pcbr = I915_READ(VLV_PCBR);
> if (pcbr) {
> @@ -5370,7 +5335,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
> * overlap with other ranges, such as the frame buffer, protected
> * memory, or any other relevant ranges.
> */
> - pctx = i915_gem_object_create_stolen(dev, pctx_size);
> + pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
> if (!pctx) {
> DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
> goto out;
> @@ -5382,13 +5347,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
> out:
> DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
> dev_priv->vlv_pctx = pctx;
> - mutex_unlock(&dev->struct_mutex);
> + mutex_unlock(&dev_priv->dev->struct_mutex);
> }
>
> -static void valleyview_cleanup_pctx(struct drm_device *dev)
> +static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> if (WARN_ON(!dev_priv->vlv_pctx))
> return;
>
> @@ -5407,12 +5370,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
> dev_priv->rps.gpll_ref_freq);
> }
>
> -static void valleyview_init_gt_powersave(struct drm_device *dev)
> +static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> u32 val;
>
> - valleyview_setup_pctx(dev);
> + valleyview_setup_pctx(dev_priv);
>
> vlv_init_gpll_ref_freq(dev_priv);
>
> @@ -5466,12 +5428,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
> mutex_unlock(&dev_priv->rps.hw_lock);
> }
>
> -static void cherryview_init_gt_powersave(struct drm_device *dev)
> +static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> u32 val;
>
> - cherryview_setup_pctx(dev);
> + cherryview_setup_pctx(dev_priv);
>
> vlv_init_gpll_ref_freq(dev_priv);
>
> @@ -5531,14 +5492,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
> mutex_unlock(&dev_priv->rps.hw_lock);
> }
>
> -static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
> +static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - valleyview_cleanup_pctx(dev);
> + valleyview_cleanup_pctx(dev_priv);
> }
>
> -static void cherryview_enable_rps(struct drm_device *dev)
> +static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> u32 gtfifodbg, val, rc6_mode = 0, pcbr;
>
> @@ -5583,8 +5543,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
> pcbr = I915_READ(VLV_PCBR);
>
> /* 3: Enable RC6 */
> - if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
> - (pcbr >> VLV_PCBR_ADDR_SHIFT))
> + if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
> + (pcbr >> VLV_PCBR_ADDR_SHIFT))
> rc6_mode = GEN7_RC_CTL_TO_MODE;
>
> I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
> @@ -5629,14 +5589,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
> intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
> dev_priv->rps.idle_freq);
>
> - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
>
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
>
> -static void valleyview_enable_rps(struct drm_device *dev)
> +static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> u32 gtfifodbg, val, rc6_mode = 0;
>
> @@ -5689,10 +5648,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
> VLV_MEDIA_RC6_COUNT_EN |
> VLV_RENDER_RC6_COUNT_EN));
>
> - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
> + if (intel_enable_rc6() & INTEL_RC6_ENABLE)
> rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
>
> - intel_print_rc6_info(dev, rc6_mode);
> + intel_print_rc6_info(dev_priv, rc6_mode);
>
> I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
>
> @@ -5719,7 +5678,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
> intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
> dev_priv->rps.idle_freq);
>
> - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
> + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
>
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
> }
> @@ -5809,10 +5768,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
>
> unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
> {
> - struct drm_device *dev = dev_priv->dev;
> unsigned long val;
>
> - if (INTEL_INFO(dev)->gen != 5)
> + if (INTEL_INFO(dev_priv)->gen != 5)
> return 0;
>
> spin_lock_irq(&mchdev_lock);
> @@ -5852,11 +5810,10 @@ static int _pxvid_to_vd(u8 pxvid)
>
> static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
> {
> - struct drm_device *dev = dev_priv->dev;
> const int vd = _pxvid_to_vd(pxvid);
> const int vm = vd - 1125;
>
> - if (INTEL_INFO(dev)->is_mobile)
> + if (INTEL_INFO(dev_priv)->is_mobile)
> return vm > 0 ? vm : 0;
>
> return vd;
> @@ -5897,9 +5854,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
>
> void i915_update_gfx_val(struct drm_i915_private *dev_priv)
> {
> - struct drm_device *dev = dev_priv->dev;
> -
> - if (INTEL_INFO(dev)->gen != 5)
> + if (INTEL_INFO(dev_priv)->gen != 5)
> return;
>
> spin_lock_irq(&mchdev_lock);
> @@ -5948,10 +5903,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
>
> unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
> {
> - struct drm_device *dev = dev_priv->dev;
> unsigned long val;
>
> - if (INTEL_INFO(dev)->gen != 5)
> + if (INTEL_INFO(dev_priv)->gen != 5)
> return 0;
>
> spin_lock_irq(&mchdev_lock);
> @@ -6140,9 +6094,8 @@ void intel_gpu_ips_teardown(void)
> spin_unlock_irq(&mchdev_lock);
> }
>
> -static void intel_init_emon(struct drm_device *dev)
> +static void intel_init_emon(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> u32 lcfuse;
> u8 pxw[16];
> int i;
> @@ -6211,10 +6164,8 @@ static void intel_init_emon(struct drm_device *dev)
> dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
> }
>
> -void intel_init_gt_powersave(struct drm_device *dev)
> +void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> /*
> * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
> * requirement.
> @@ -6224,20 +6175,18 @@ void intel_init_gt_powersave(struct drm_device *dev)
> intel_runtime_pm_get(dev_priv);
> }
>
> - if (IS_CHERRYVIEW(dev))
> - cherryview_init_gt_powersave(dev);
> - else if (IS_VALLEYVIEW(dev))
> - valleyview_init_gt_powersave(dev);
> + if (IS_CHERRYVIEW(dev_priv))
> + cherryview_init_gt_powersave(dev_priv);
> + else if (IS_VALLEYVIEW(dev_priv))
> + valleyview_init_gt_powersave(dev_priv);
> }
>
> -void intel_cleanup_gt_powersave(struct drm_device *dev)
> +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> - if (IS_CHERRYVIEW(dev))
> + if (IS_CHERRYVIEW(dev_priv))
> return;
> - else if (IS_VALLEYVIEW(dev))
> - valleyview_cleanup_gt_powersave(dev);
> + else if (IS_VALLEYVIEW(dev_priv))
> + valleyview_cleanup_gt_powersave(dev_priv);
>
> if (!i915.enable_rc6)
> intel_runtime_pm_put(dev_priv);
> @@ -6252,16 +6201,14 @@ static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
>
> /**
> * intel_suspend_gt_powersave - suspend PM work and helper threads
> - * @dev: drm device
> + * @dev_priv: i915 device
> *
> * We don't want to disable RC6 or other features here, we just want
> * to make sure any work we've queued has finished and won't bother
> * us while we're suspended.
> */
> -void intel_suspend_gt_powersave(struct drm_device *dev)
> +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> if (INTEL_GEN(dev_priv) < 6)
> return;
>
> @@ -6271,25 +6218,23 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
> gen6_rps_idle(dev_priv);
> }
>
> -void intel_disable_gt_powersave(struct drm_device *dev)
> +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> - if (IS_IRONLAKE_M(dev)) {
> + if (IS_IRONLAKE_M(dev_priv)) {
> ironlake_disable_drps(dev_priv);
> - } else if (INTEL_INFO(dev)->gen >= 6) {
> - intel_suspend_gt_powersave(dev);
> + } else if (INTEL_INFO(dev_priv)->gen >= 6) {
> + intel_suspend_gt_powersave(dev_priv);
>
> mutex_lock(&dev_priv->rps.hw_lock);
> - if (INTEL_INFO(dev)->gen >= 9) {
> - gen9_disable_rc6(dev);
> - gen9_disable_rps(dev);
> - } else if (IS_CHERRYVIEW(dev))
> - cherryview_disable_rps(dev);
> - else if (IS_VALLEYVIEW(dev))
> - valleyview_disable_rps(dev);
> + if (INTEL_INFO(dev_priv)->gen >= 9) {
> + gen9_disable_rc6(dev_priv);
> + gen9_disable_rps(dev_priv);
> + } else if (IS_CHERRYVIEW(dev_priv))
> + cherryview_disable_rps(dev_priv);
> + else if (IS_VALLEYVIEW(dev_priv))
> + valleyview_disable_rps(dev_priv);
> else
> - gen6_disable_rps(dev);
> + gen6_disable_rps(dev_priv);
>
> dev_priv->rps.enabled = false;
> mutex_unlock(&dev_priv->rps.hw_lock);
> @@ -6301,27 +6246,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
> struct drm_i915_private *dev_priv =
> container_of(work, struct drm_i915_private,
> rps.delayed_resume_work.work);
> - struct drm_device *dev = dev_priv->dev;
>
> mutex_lock(&dev_priv->rps.hw_lock);
>
> - gen6_reset_rps_interrupts(dev);
> + gen6_reset_rps_interrupts(dev_priv);
>
> - if (IS_CHERRYVIEW(dev)) {
> - cherryview_enable_rps(dev);
> - } else if (IS_VALLEYVIEW(dev)) {
> - valleyview_enable_rps(dev);
> - } else if (INTEL_INFO(dev)->gen >= 9) {
> - gen9_enable_rc6(dev);
> - gen9_enable_rps(dev);
> - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
> - __gen6_update_ring_freq(dev);
> - } else if (IS_BROADWELL(dev)) {
> - gen8_enable_rps(dev);
> - __gen6_update_ring_freq(dev);
> + if (IS_CHERRYVIEW(dev_priv)) {
> + cherryview_enable_rps(dev_priv);
> + } else if (IS_VALLEYVIEW(dev_priv)) {
> + valleyview_enable_rps(dev_priv);
> + } else if (INTEL_INFO(dev_priv)->gen >= 9) {
> + gen9_enable_rc6(dev_priv);
> + gen9_enable_rps(dev_priv);
> + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
> + __gen6_update_ring_freq(dev_priv);
> + } else if (IS_BROADWELL(dev_priv)) {
> + gen8_enable_rps(dev_priv);
> + __gen6_update_ring_freq(dev_priv);
> } else {
> - gen6_enable_rps(dev);
> - __gen6_update_ring_freq(dev);
> + gen6_enable_rps(dev_priv);
> + __gen6_update_ring_freq(dev_priv);
> }
>
> WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
> @@ -6339,20 +6283,18 @@ static void intel_gen6_powersave_work(struct work_struct *work)
> intel_runtime_pm_put(dev_priv);
> }
>
> -void intel_enable_gt_powersave(struct drm_device *dev)
> +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> /* Powersaving is controlled by the host when inside a VM */
> if (intel_vgpu_active(dev_priv))
> return;
>
> - if (IS_IRONLAKE_M(dev)) {
> + if (IS_IRONLAKE_M(dev_priv)) {
> ironlake_enable_drps(dev_priv);
> - mutex_lock(&dev->struct_mutex);
> - intel_init_emon(dev);
> - mutex_unlock(&dev->struct_mutex);
> - } else if (INTEL_INFO(dev)->gen >= 6) {
> + mutex_lock(&dev_priv->dev->struct_mutex);
> + intel_init_emon(dev_priv);
> + mutex_unlock(&dev_priv->dev->struct_mutex);
> + } else if (INTEL_INFO(dev_priv)->gen >= 6) {
> /*
> * PCU communication is slow and this doesn't need to be
> * done at any specific time, so do this out of our fast path
> @@ -6371,11 +6313,9 @@ void intel_enable_gt_powersave(struct drm_device *dev)
> }
> }
>
> -void intel_reset_gt_powersave(struct drm_device *dev)
> +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> - if (INTEL_INFO(dev)->gen < 6)
> + if (INTEL_INFO(dev_priv)->gen < 6)
> return;
>
> gen6_suspend_rps(dev_priv);
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
> index 4ea2bf2c2a4a..0c48af2f1b7e 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
> return HRTIMER_NORESTART;
> }
>
> -void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
> +void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
> + bool restore)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> unsigned long irqflags;
> struct intel_uncore_forcewake_domain *domain;
> int retry_count = 100;
> @@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
> if (fw)
> dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
>
> - if (IS_GEN6(dev) || IS_GEN7(dev))
> + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
> dev_priv->uncore.fifo_count =
> fifo_free_entries(dev_priv);
> }
> @@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
> return false;
> }
>
> -static void __intel_uncore_early_sanitize(struct drm_device *dev,
> +static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
> bool restore_forcewake)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> /* clear out unclaimed reg detection bit */
> if (check_for_unclaimed_mmio(dev_priv))
> DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
>
> /* clear out old GT FIFO errors */
> - if (IS_GEN6(dev) || IS_GEN7(dev))
> + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
> __raw_i915_write32(dev_priv, GTFIFODBG,
> __raw_i915_read32(dev_priv, GTFIFODBG));
>
> /* WaDisableShadowRegForCpd:chv */
> - if (IS_CHERRYVIEW(dev)) {
> + if (IS_CHERRYVIEW(dev_priv)) {
> __raw_i915_write32(dev_priv, GTFIFOCTL,
> __raw_i915_read32(dev_priv, GTFIFOCTL) |
> GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
> GT_FIFO_CTL_RC6_POLICY_STALL);
> }
>
> - intel_uncore_forcewake_reset(dev, restore_forcewake);
> + intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
> }
>
> -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
> +void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
> + bool restore_forcewake)
> {
> - __intel_uncore_early_sanitize(dev, restore_forcewake);
> - i915_check_and_clear_faults(dev);
> + __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
> + i915_check_and_clear_faults(dev_priv);
> }
>
> -void intel_uncore_sanitize(struct drm_device *dev)
> +void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
> {
> - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
> + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
>
> /* BIOS often leaves RC6 enabled, but disable it for hw init */
> - intel_disable_gt_powersave(dev);
> + intel_disable_gt_powersave(dev_priv);
> }
>
> static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
> @@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
> fw_domain_reset(d);
> }
>
> -static void intel_uncore_fw_domains_init(struct drm_device *dev)
> +static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> if (INTEL_INFO(dev_priv)->gen <= 5)
> return;
>
> - if (IS_GEN9(dev)) {
> + if (IS_GEN9(dev_priv)) {
> dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
> dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> @@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
> FORCEWAKE_ACK_BLITTER_GEN9);
> fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
> FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
> - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
> + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
> - if (!IS_CHERRYVIEW(dev))
> + if (!IS_CHERRYVIEW(dev_priv))
> dev_priv->uncore.funcs.force_wake_put =
> fw_domains_put_with_fifo;
> else
> @@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
> FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
> fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
> FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
> - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
> + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
> dev_priv->uncore.funcs.force_wake_get =
> fw_domains_get_with_thread_status;
> - if (IS_HASWELL(dev))
> + if (IS_HASWELL(dev_priv))
> dev_priv->uncore.funcs.force_wake_put =
> fw_domains_put_with_fifo;
> else
> dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
> - } else if (IS_IVYBRIDGE(dev)) {
> + } else if (IS_IVYBRIDGE(dev_priv)) {
> u32 ecobus;
>
> /* IVB configs may use multi-threaded forcewake */
> @@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
> fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> FORCEWAKE_MT, FORCEWAKE_MT_ACK);
>
> - mutex_lock(&dev->struct_mutex);
> fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
> ecobus = __raw_i915_read32(dev_priv, ECOBUS);
> fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
> - mutex_unlock(&dev->struct_mutex);
>
> if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
> DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
> @@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
> fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> FORCEWAKE, FORCEWAKE_ACK);
> }
> - } else if (IS_GEN6(dev)) {
> + } else if (IS_GEN6(dev_priv)) {
> dev_priv->uncore.funcs.force_wake_get =
> fw_domains_get_with_thread_status;
> dev_priv->uncore.funcs.force_wake_put =
> @@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
> WARN_ON(dev_priv->uncore.fw_domains == 0);
> }
>
> -void intel_uncore_init(struct drm_device *dev)
> +void intel_uncore_init(struct drm_i915_private *dev_priv)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> - i915_check_vgpu(dev);
> + i915_check_vgpu(dev_priv);
>
> intel_uncore_edram_detect(dev_priv);
> - intel_uncore_fw_domains_init(dev);
> - __intel_uncore_early_sanitize(dev, false);
> + intel_uncore_fw_domains_init(dev_priv);
> + __intel_uncore_early_sanitize(dev_priv, false);
>
> dev_priv->uncore.unclaimed_mmio_check = 1;
>
> - switch (INTEL_INFO(dev)->gen) {
> + switch (INTEL_INFO(dev_priv)->gen) {
> default:
> case 9:
> ASSIGN_WRITE_MMIO_VFUNCS(gen9);
> ASSIGN_READ_MMIO_VFUNCS(gen9);
> break;
> case 8:
> - if (IS_CHERRYVIEW(dev)) {
> + if (IS_CHERRYVIEW(dev_priv)) {
> ASSIGN_WRITE_MMIO_VFUNCS(chv);
> ASSIGN_READ_MMIO_VFUNCS(chv);
>
> @@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
> break;
> case 7:
> case 6:
> - if (IS_HASWELL(dev)) {
> + if (IS_HASWELL(dev_priv)) {
> ASSIGN_WRITE_MMIO_VFUNCS(hsw);
> } else {
> ASSIGN_WRITE_MMIO_VFUNCS(gen6);
> }
>
> - if (IS_VALLEYVIEW(dev)) {
> + if (IS_VALLEYVIEW(dev_priv)) {
> ASSIGN_READ_MMIO_VFUNCS(vlv);
> } else {
> ASSIGN_READ_MMIO_VFUNCS(gen6);
> @@ -1386,16 +1379,16 @@ void intel_uncore_init(struct drm_device *dev)
> ASSIGN_READ_MMIO_VFUNCS(vgpu);
> }
>
> - i915_check_and_clear_faults(dev);
> + i915_check_and_clear_faults(dev_priv);
> }
> #undef ASSIGN_WRITE_MMIO_VFUNCS
> #undef ASSIGN_READ_MMIO_VFUNCS
>
> -void intel_uncore_fini(struct drm_device *dev)
> +void intel_uncore_fini(struct drm_i915_private *dev_priv)
> {
> /* Paranoia: make sure we have disabled everything before we exit. */
> - intel_uncore_sanitize(dev);
> - intel_uncore_forcewake_reset(dev, false);
> + intel_uncore_sanitize(dev_priv);
> + intel_uncore_forcewake_reset(dev_priv, false);
> }
>
> #define GEN_RANGE(l, h) GENMASK(h, l)
> @@ -1506,44 +1499,47 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
> return 0;
> }
>
> -static int i915_reset_complete(struct drm_device *dev)
> +static int i915_reset_complete(struct pci_dev *pdev)
> {
> u8 gdrst;
> - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
> + pci_read_config_byte(pdev, I915_GDRST, &gdrst);
> return (gdrst & GRDOM_RESET_STATUS) == 0;
> }
>
> -static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
> +static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
> {
> + struct pci_dev *pdev = dev_priv->dev->pdev;
> +
> /* assert reset for at least 20 usec */
> - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
> + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
> udelay(20);
> - pci_write_config_byte(dev->pdev, I915_GDRST, 0);
> + pci_write_config_byte(pdev, I915_GDRST, 0);
>
> - return wait_for(i915_reset_complete(dev), 500);
> + return wait_for(i915_reset_complete(pdev), 500);
> }
>
> -static int g4x_reset_complete(struct drm_device *dev)
> +static int g4x_reset_complete(struct pci_dev *pdev)
> {
> u8 gdrst;
> - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
> + pci_read_config_byte(pdev, I915_GDRST, &gdrst);
> return (gdrst & GRDOM_RESET_ENABLE) == 0;
> }
>
> -static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
> +static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
> {
> - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
> - return wait_for(g4x_reset_complete(dev), 500);
> + struct pci_dev *pdev = dev_priv->dev->pdev;
> + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
> + return wait_for(g4x_reset_complete(pdev), 500);
> }
>
> -static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
> +static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> + struct pci_dev *pdev = dev_priv->dev->pdev;
> int ret;
>
> - pci_write_config_byte(dev->pdev, I915_GDRST,
> + pci_write_config_byte(pdev, I915_GDRST,
> GRDOM_RENDER | GRDOM_RESET_ENABLE);
> - ret = wait_for(g4x_reset_complete(dev), 500);
> + ret = wait_for(g4x_reset_complete(pdev), 500);
> if (ret)
> return ret;
>
> @@ -1551,9 +1547,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
> I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
> POSTING_READ(VDECCLK_GATE_D);
>
> - pci_write_config_byte(dev->pdev, I915_GDRST,
> + pci_write_config_byte(pdev, I915_GDRST,
> GRDOM_MEDIA | GRDOM_RESET_ENABLE);
> - ret = wait_for(g4x_reset_complete(dev), 500);
> + ret = wait_for(g4x_reset_complete(pdev), 500);
> if (ret)
> return ret;
>
> @@ -1561,14 +1557,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
> I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
> POSTING_READ(VDECCLK_GATE_D);
>
> - pci_write_config_byte(dev->pdev, I915_GDRST, 0);
> + pci_write_config_byte(pdev, I915_GDRST, 0);
>
> return 0;
> }
>
> -static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
> +static int ironlake_do_reset(struct drm_i915_private *dev_priv,
> + unsigned engine_mask)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> int ret;
>
> I915_WRITE(ILK_GDSR,
> @@ -1612,7 +1608,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
>
> /**
> * gen6_reset_engines - reset individual engines
> - * @dev: DRM device
> + * @dev_priv: i915 device
> * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
> *
> * This function will reset the individual engines that are set in engine_mask.
> @@ -1623,9 +1619,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
> *
> * Returns 0 on success, nonzero on error.
> */
> -static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
> +static int gen6_reset_engines(struct drm_i915_private *dev_priv,
> + unsigned engine_mask)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
> const u32 hw_engine_mask[I915_NUM_ENGINES] = {
> [RCS] = GEN6_GRDOM_RENDER,
> @@ -1647,7 +1643,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
>
> ret = gen6_hw_domain_reset(dev_priv, hw_mask);
>
> - intel_uncore_forcewake_reset(dev, true);
> + intel_uncore_forcewake_reset(dev_priv, true);
>
> return ret;
> }
> @@ -1688,16 +1684,16 @@ static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
> _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
> }
>
> -static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
> +static int gen8_reset_engines(struct drm_i915_private *dev_priv,
> + unsigned engine_mask)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_engine_cs *engine;
>
> for_each_engine_masked(engine, dev_priv, engine_mask)
> if (gen8_request_engine_reset(engine))
> goto not_ready;
>
> - return gen6_reset_engines(dev, engine_mask);
> + return gen6_reset_engines(dev_priv, engine_mask);
>
> not_ready:
> for_each_engine_masked(engine, dev_priv, engine_mask)
> @@ -1706,35 +1702,35 @@ not_ready:
> return -EIO;
> }
>
> -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
> - unsigned engine_mask)
> +typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
> +
> +static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
> {
> if (!i915.reset)
> return NULL;
>
> - if (INTEL_INFO(dev)->gen >= 8)
> + if (INTEL_INFO(dev_priv)->gen >= 8)
> return gen8_reset_engines;
> - else if (INTEL_INFO(dev)->gen >= 6)
> + else if (INTEL_INFO(dev_priv)->gen >= 6)
> return gen6_reset_engines;
> - else if (IS_GEN5(dev))
> + else if (IS_GEN5(dev_priv))
> return ironlake_do_reset;
> - else if (IS_G4X(dev))
> + else if (IS_G4X(dev_priv))
> return g4x_do_reset;
> - else if (IS_G33(dev))
> + else if (IS_G33(dev_priv))
> return g33_do_reset;
> - else if (INTEL_INFO(dev)->gen >= 3)
> + else if (INTEL_INFO(dev_priv)->gen >= 3)
> return i915_do_reset;
> else
> return NULL;
> }
>
> -int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
> +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> - int (*reset)(struct drm_device *, unsigned);
> + reset_func reset;
> int ret;
>
> - reset = intel_get_gpu_reset(dev);
> + reset = intel_get_gpu_reset(dev_priv);
> if (reset == NULL)
> return -ENODEV;
>
> @@ -1742,15 +1738,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
> * request may be dropped and never completes (causing -EIO).
> */
> intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> - ret = reset(dev, engine_mask);
> + ret = reset(dev_priv, engine_mask);
> intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>
> return ret;
> }
>
> -bool intel_has_gpu_reset(struct drm_device *dev)
> +bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
> {
> - return intel_get_gpu_reset(dev) != NULL;
> + return intel_get_gpu_reset(dev_priv) != NULL;
> }
>
> int intel_guc_reset(struct drm_i915_private *dev_priv)
--
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
More information about the Intel-gfx
mailing list