[Intel-gfx] [PATCH 04/41] drm/i915: Remove RPM sequence checking
Imre Deak
imre.deak at intel.com
Fri Oct 21 10:19:32 UTC 2016
On to, 2016-10-20 at 16:03 +0100, Chris Wilson wrote:
> We only used the RPM sequence checking inside the lowlevel GTT
> accessors, when we had to rely on callers taking the wakeref on our
> behalf. Now that we take the RPM wakeref inside the GTT management
> routines themselves, we can forgo the sanitycheck of the callers.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Imre Deak <imre.deak at intel.com>
Reviewed-by: Imre Deak <imre.deak at intel.com>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 1 -
> drivers/gpu/drm/i915/i915_gem_gtt.c | 55 +--------------------
> ------------
> drivers/gpu/drm/i915/intel_drv.h | 17 ----------
> drivers/gpu/drm/i915/intel_pm.c | 1 -
> drivers/gpu/drm/i915/intel_runtime_pm.c | 3 +-
> 5 files changed, 2 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h
> b/drivers/gpu/drm/i915/i915_drv.h
> index f4af40452dfd..c388361ad717 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1687,7 +1687,6 @@ struct skl_wm_level {
> */
> struct i915_runtime_pm {
> atomic_t wakeref_count;
> - atomic_t atomic_seq;
> bool suspended;
> bool irqs_enabled;
> };
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c
> b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 33036359c170..947d5ad51fb7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2395,16 +2395,11 @@ static void gen8_ggtt_insert_page(struct
> i915_address_space *vm,
> gen8_pte_t __iomem *pte =
> (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
> (offset >> PAGE_SHIFT);
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> gen8_set_pte(pte, gen8_pte_encode(addr, level));
>
> I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> POSTING_READ(GFX_FLSH_CNTL_GEN6);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
> @@ -2418,11 +2413,8 @@ static void gen8_ggtt_insert_entries(struct
> i915_address_space *vm,
> gen8_pte_t __iomem *gtt_entries;
> gen8_pte_t gtt_entry;
> dma_addr_t addr;
> - int rpm_atomic_seq;
> int i = 0;
>
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
> -
> gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >>
> PAGE_SHIFT);
>
> for_each_sgt_dma(addr, sgt_iter, st) {
> @@ -2446,8 +2438,6 @@ static void gen8_ggtt_insert_entries(struct
> i915_address_space *vm,
> */
> I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> POSTING_READ(GFX_FLSH_CNTL_GEN6);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> struct insert_entries {
> @@ -2486,16 +2476,11 @@ static void gen6_ggtt_insert_page(struct
> i915_address_space *vm,
> gen6_pte_t __iomem *pte =
> (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
> (offset >> PAGE_SHIFT);
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> iowrite32(vm->pte_encode(addr, level, flags), pte);
>
> I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> POSTING_READ(GFX_FLSH_CNTL_GEN6);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> /*
> @@ -2515,11 +2500,8 @@ static void gen6_ggtt_insert_entries(struct
> i915_address_space *vm,
> gen6_pte_t __iomem *gtt_entries;
> gen6_pte_t gtt_entry;
> dma_addr_t addr;
> - int rpm_atomic_seq;
> int i = 0;
>
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
> -
> gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >>
> PAGE_SHIFT);
>
> for_each_sgt_dma(addr, sgt_iter, st) {
> @@ -2542,8 +2524,6 @@ static void gen6_ggtt_insert_entries(struct
> i915_address_space *vm,
> */
> I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> POSTING_READ(GFX_FLSH_CNTL_GEN6);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> static void nop_clear_range(struct i915_address_space *vm,
> @@ -2554,7 +2534,6 @@ static void nop_clear_range(struct
> i915_address_space *vm,
> static void gen8_ggtt_clear_range(struct i915_address_space *vm,
> uint64_t start, uint64_t length)
> {
> - struct drm_i915_private *dev_priv = to_i915(vm->dev);
> struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
> unsigned first_entry = start >> PAGE_SHIFT;
> unsigned num_entries = length >> PAGE_SHIFT;
> @@ -2562,9 +2541,6 @@ static void gen8_ggtt_clear_range(struct
> i915_address_space *vm,
> (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
> const int max_entries = ggtt_total_entries(ggtt) -
> first_entry;
> int i;
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> if (WARN(num_entries > max_entries,
> "First entry = %d; Num entries = %d (max=%d)\n",
> @@ -2576,15 +2552,12 @@ static void gen8_ggtt_clear_range(struct
> i915_address_space *vm,
> for (i = 0; i < num_entries; i++)
> gen8_set_pte(>t_base[i], scratch_pte);
> readl(gtt_base);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> static void gen6_ggtt_clear_range(struct i915_address_space *vm,
> uint64_t start,
> uint64_t length)
> {
> - struct drm_i915_private *dev_priv = to_i915(vm->dev);
> struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
> unsigned first_entry = start >> PAGE_SHIFT;
> unsigned num_entries = length >> PAGE_SHIFT;
> @@ -2592,9 +2565,6 @@ static void gen6_ggtt_clear_range(struct
> i915_address_space *vm,
> (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
> const int max_entries = ggtt_total_entries(ggtt) -
> first_entry;
> int i;
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> if (WARN(num_entries > max_entries,
> "First entry = %d; Num entries = %d (max=%d)\n",
> @@ -2607,8 +2577,6 @@ static void gen6_ggtt_clear_range(struct
> i915_address_space *vm,
> for (i = 0; i < num_entries; i++)
> iowrite32(scratch_pte, >t_base[i]);
> readl(gtt_base);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> static void i915_ggtt_insert_page(struct i915_address_space *vm,
> @@ -2617,16 +2585,10 @@ static void i915_ggtt_insert_page(struct
> i915_address_space *vm,
> enum i915_cache_level cache_level,
> u32 unused)
> {
> - struct drm_i915_private *dev_priv = to_i915(vm->dev);
> unsigned int flags = (cache_level == I915_CACHE_NONE) ?
> AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> }
>
> static void i915_ggtt_insert_entries(struct i915_address_space *vm,
> @@ -2634,33 +2596,18 @@ static void i915_ggtt_insert_entries(struct
> i915_address_space *vm,
> uint64_t start,
> enum i915_cache_level
> cache_level, u32 unused)
> {
> - struct drm_i915_private *dev_priv = to_i915(vm->dev);
> unsigned int flags = (cache_level == I915_CACHE_NONE) ?
> AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>
> intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT,
> flags);
>
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> -
> }
>
> static void i915_ggtt_clear_range(struct i915_address_space *vm,
> uint64_t start,
> uint64_t length)
> {
> - struct drm_i915_private *dev_priv = to_i915(vm->dev);
> - unsigned first_entry = start >> PAGE_SHIFT;
> - unsigned num_entries = length >> PAGE_SHIFT;
> - int rpm_atomic_seq;
> -
> - rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
> -
> - intel_gtt_clear_range(first_entry, num_entries);
> -
> - assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
> + intel_gtt_clear_range(start >> PAGE_SHIFT, length >>
> PAGE_SHIFT);
> }
>
> static int ggtt_bind_vma(struct i915_vma *vma,
> diff --git a/drivers/gpu/drm/i915/intel_drv.h
> b/drivers/gpu/drm/i915/intel_drv.h
> index c06a33e0ff19..95a7d3005a74 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -1664,23 +1664,6 @@ assert_rpm_wakelock_held(struct
> drm_i915_private *dev_priv)
> DRM_DEBUG_DRIVER("RPM wakelock ref not held during
> HW access");
> }
>
> -static inline int
> -assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
> -{
> - int seq = atomic_read(&dev_priv->pm.atomic_seq);
> -
> - assert_rpm_wakelock_held(dev_priv);
> -
> - return seq;
> -}
> -
> -static inline void
> -assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int
> begin_seq)
> -{
> - WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) !=
> begin_seq,
> - "HW access outside of RPM atomic section\n");
> -}
> -
> /**
> * disable_rpm_wakeref_asserts - disable the RPM assert checks
> * @dev_priv: i915 device instance
> diff --git a/drivers/gpu/drm/i915/intel_pm.c
> b/drivers/gpu/drm/i915/intel_pm.c
> index 0a9e7f2045d4..f212b71f5bdb 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -8047,5 +8047,4 @@ void intel_pm_setup(struct drm_device *dev)
>
> dev_priv->pm.suspended = false;
> atomic_set(&dev_priv->pm.wakeref_count, 0);
> - atomic_set(&dev_priv->pm.atomic_seq, 0);
> }
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c
> b/drivers/gpu/drm/i915/intel_runtime_pm.c
> index ee56a8756c07..82edba2f3589 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> @@ -2736,8 +2736,7 @@ void intel_runtime_pm_put(struct
> drm_i915_private *dev_priv)
> struct device *kdev = &pdev->dev;
>
> assert_rpm_wakelock_held(dev_priv);
> - if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
> - atomic_inc(&dev_priv->pm.atomic_seq);
> + atomic_dec(&dev_priv->pm.wakeref_count);
>
> pm_runtime_mark_last_busy(kdev);
> pm_runtime_put_autosuspend(kdev);
More information about the Intel-gfx
mailing list