[openchrome-devel] drm-openchrome: Branch 'drm-next-6.1' - 114 commits - drivers/gpu/drm

Kevin Brace kevinbrace at kemper.freedesktop.org
Sat Oct 15 00:06:19 UTC 2022


 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c                         |    6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c                        |   14 
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c                         |    5 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c                            |    8 
 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c                           |   29 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h                           |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c                            |   17 
 drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h                            |    7 
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c                              |    6 
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c                             |    6 
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c                             |    6 
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c                             |   29 -
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c                             |   11 
 drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c                             |   15 
 drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c                             |   17 
 drivers/gpu/drm/amd/amdgpu/si_dma.c                                |    5 
 drivers/gpu/drm/amd/amdgpu/soc21.c                                 |    1 
 drivers/gpu/drm/amd/amdgpu/umc_v6_1.c                              |   10 
 drivers/gpu/drm/amd/amdgpu/umc_v6_7.c                              |  165 ++----
 drivers/gpu/drm/amd/amdgpu/umc_v8_10.c                             |   78 ++-
 drivers/gpu/drm/amd/amdgpu/umc_v8_7.c                              |   63 +-
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c                   |    3 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c                  |   71 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c              |    8 
 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c                 |    7 
 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c       |    4 
 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c         |   11 
 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c       |   85 ++-
 drivers/gpu/drm/amd/display/dc/core/dc.c                           |  105 ++++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c                      |   11 
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c                   |   70 +-
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c                  |   53 ++
 drivers/gpu/drm/amd/display/dc/core/dc_stream.c                    |    8 
 drivers/gpu/drm/amd/display/dc/dc.h                                |    8 
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c                       |  147 ++++++
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h                       |    1 
 drivers/gpu/drm/amd/display/dc/dc_link.h                           |    4 
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c                       |   13 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c                   |    1 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c          |  239 ++--------
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c                  |   40 -
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h                  |    1 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c              |   66 ++
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c                  |   30 +
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c                 |   25 -
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c                  |    1 
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c                |    8 
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c              |   13 
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c                   |    4 
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c                  |    3 
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c              |    4 
 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c            |    2 
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c |   20 
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c                  |    2 
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c              |   15 
 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c  |    2 
 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c            |   16 
 drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c            |   15 
 drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c            |   13 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c      |    7 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h      |    4 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c    |   57 +-
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h    |   14 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h   |    1 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c                |    1 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c                  |    6 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c                 |   42 +
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c                  |    2 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c              |   31 +
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h              |   22 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c      |   88 +++
 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c    |    1 
 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c            |    6 
 drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c               |  118 ++--
 drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c               |   96 +---
 drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h               |    1 
 drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c     |   15 
 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c               |  131 ++---
 drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c     |   21 
 drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c              |    1 
 drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h              |    1 
 drivers/gpu/drm/amd/display/dc/inc/core_types.h                    |    6 
 drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h                     |   19 
 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h                    |   15 
 drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h           |   99 ++++
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h                        |    4 
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h                       |    5 
 drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h           |    1 
 drivers/gpu/drm/amd/display/dc/inc/resource.h                      |    6 
 drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c             |    2 
 drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c         |    2 
 drivers/gpu/drm/amd/display/dmub/dmub_srv.h                        |    1 
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h                    |  140 +++++
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c                  |    1 
 drivers/gpu/drm/amd/display/modules/color/color_gamma.c            |    2 
 drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h       |    2 
 drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h      |    3 
 drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c                         |    5 
 drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c                |    2 
 drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c              |    2 
 drivers/gpu/drm/i915/display/g4x_hdmi.c                            |    2 
 drivers/gpu/drm/i915/display/intel_display.c                       |   18 
 drivers/gpu/drm/i915/display/intel_fb_pin.c                        |   62 +-
 drivers/gpu/drm/i915/display/intel_psr.c                           |   78 +--
 drivers/gpu/drm/i915/display/skl_watermark.c                       |   16 
 drivers/gpu/drm/i915/gem/i915_gem_context.c                        |    8 
 drivers/gpu/drm/i915/gem/i915_gem_object.c                         |   37 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h                         |    4 
 drivers/gpu/drm/i915/gem/i915_gem_object_types.h                   |    3 
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c                            |    5 
 drivers/gpu/drm/i915/gt/intel_context.c                            |    5 
 drivers/gpu/drm/i915/gt/intel_context.h                            |    3 
 drivers/gpu/drm/i915/gt/intel_ggtt.c                               |    8 
 drivers/gpu/drm/i915/gt/intel_mocs.c                               |    8 
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c                  |   26 -
 drivers/gpu/drm/i915/i915_reg.h                                    |   16 
 116 files changed, 1830 insertions(+), 1081 deletions(-)

New commits:
commit d2001eb17199a93e4f03c457a7796b608f64c9c9
Merge: be32b28967f4 fc3523a833c9
Author: Kevin Brace <kevinbrace at bracecomputerlab.com>
Date:   Fri Oct 14 18:00:12 2022 -0500

    Merge tag 'drm-next-2022-10-14' of git://anongit.freedesktop.org/drm/drm into drm-next-6.1
    
    drm fixes for 6.1-rc1
    
    amdgpu:
    - DC mutex fix
    - DC SubVP fixes
    - DCN 3.2.x fixes
    - DCN 3.1.x fixes
    - SDMA 6.x fixes
    - Enable DPIA for 3.1.4
    - VRR fixes
    - VRAM BO swapping fix
    - Revert dirty fb helper change
    - SR-IOV suspend/resume fixes
    - Work around GCC array bounds check fail warning
    - UMC 8.10 fixes
    - Misc fixes and cleanups
    
    i915:
    - Round to closest in g4x+ HDMI clock readout
    - Update MOCS table for EHL
    - Fix PSR_IMR/IIR field handling
    - Fix watermark calculations for gen12+/DG2 modifiers
    - Reject excessive dotclocks early
    - Fix revocation of non-persistent contexts
    - Handle migration for dpt
    - Fix display problems after resume
    - Allow control over the flags when migrating
    - Consider DG2_RC_CCS_CC when migrating buffers

commit fc3523a833c9c109e68209f1ecdd15864373e66a
Merge: e55978a4f264 0811b9e4530d
Author: Dave Airlie <airlied at redhat.com>
Date:   Fri Oct 14 07:47:25 2022 +1000

    Merge tag 'amd-drm-fixes-6.1-2022-10-12' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
    
    amd-drm-fixes-6.1-2022-10-12:
    
    amdgpu:
    - DC mutex fix
    - DC SubVP fixes
    - DCN 3.2.x fixes
    - DCN 3.1.x fixes
    - SDMA 6.x fixes
    - Enable DPIA for 3.1.4
    - VRR fixes
    - VRAM BO swapping fix
    - Revert dirty fb helper change
    - SR-IOV suspend/resume fixes
    - Work around GCC array bounds check fail warning
    - UMC 8.10 fixes
    - Misc fixes and cleanups
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    From: Alex Deucher <alexander.deucher at amd.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221012162650.8810-1-alexander.deucher@amd.com

commit e55978a4f264676dfbbd5d955c88a60c6eb3aa7a
Merge: d6fe5887ca89 ea19684afb54
Author: Dave Airlie <airlied at redhat.com>
Date:   Fri Oct 14 07:46:50 2022 +1000

    Merge tag 'drm-intel-next-fixes-2022-10-13' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
    
    - Fix revocation of non-persistent contexts (Tvrtko Ursulin)
    - Handle migration for dpt (Matthew Auld)
    - Fix display problems after resume (Thomas Hellström)
    - Allow control over the flags when migrating (Matthew Auld)
    - Consider DG2_RC_CCS_CC when migrating buffers (Matthew Auld)
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    From: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/Y0gK9QmCmktLLzqp@tursulin-desk

commit 0811b9e4530d7c46542a8993ce6b725d042c6154
Author: Aurabindo Pillai <aurabindo.pillai at amd.com>
Date:   Thu Oct 6 17:17:40 2022 -0400

    drm/amd/display: Add HUBP surface flip interrupt handler
    
    Add the hubp surface flip handler. This fixes some flip timeout issues.
    
    Acked-by: Alex Deucher <alexander.deucher at amd.com>
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index f4b901d393eb..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -181,6 +181,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
 	.hubp_init = hubp3_init,
 	.set_unbounded_requesting = hubp31_set_unbounded_requesting,
 	.hubp_soft_reset = hubp31_soft_reset,
+	.hubp_set_flip_int = hubp1_set_flip_int,
 	.hubp_in_blank = hubp1_in_blank,
 	.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
 	.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
commit d6fe5887ca891f5a7a3998bcbeccd6ec2e215132
Merge: bafaf67c42f4 cdf6428dd518
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Oct 12 14:20:59 2022 +1000

    Merge tag 'drm-intel-next-fixes-2022-10-06-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
    
    - Round to closest in g4x+ HDMI clock readout (Ville Syrjälä)
    - Update MOCS table for EHL (Tejas Upadhyay)
    - Fix PSR_IMR/IIR field handling (Jouni Högander)
    - Fix watermark calculations for gen12+ RC CCS modifier (Ville Syrjälä)
    - Fix watermark calculations for gen12+ MC CCS modifier (Ville Syrjälä)
    - Fix watermark calculations for gen12+ CCS+CC modifier (Ville Syrjälä)
    - Fix watermark calculations for DG2 CCS modifiers (Ville Syrjälä)
    - Fix watermark calculations for DG2 CCS+CC modifier (Ville Syrjälä)
    - Reject excessive dotclocks early (Ville Syrjälä)
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    From: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/Yz6rkXI9HKFUvtWK@tursulin-desk

commit ea19684afb545605bbcb690c49a91ce2c8e596dd
Author: Matthew Auld <matthew.auld at intel.com>
Date:   Tue Oct 4 14:19:15 2022 +0100

    drm/i915/display: consider DG2_RC_CCS_CC when migrating buffers
    
    For these types of display buffers, we need to able to CPU access some
    part of the backing memory in prepare_plane_clear_colors(). As a result
    we need to ensure we always place in the mappable part of lmem, which
    becomes necessary on small-bar systems.
    
    v2(Nirmoy & Ville):
     - Add some commentary for why we need to CPU access the buffer.
     - Split out the other changes, so we just consider the display change
       here.
    v3:
     - Handle this in the dpt path.
    v4(Ville):
     - Drop the intel_fb_rc_ccs_cc_plane() sanity check in
       pin_and_fence_fb_obj(), since we can also trigger this on DG1 it
       seems.
    
    Fixes: eb1c535f0d69 ("drm/i915: turn on small BAR support")
    Reported-by: Jianshui Yu <jianshui.yu at intel.com>
    Signed-off-by: Matthew Auld <matthew.auld at intel.com>
    Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Cc: Nirmoy Das <nirmoy.das at intel.com>
    Reviewed-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Acked-by: Nirmoy Das <nirmoy.das at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221004131916.233474-4-matthew.auld@intel.com
    (cherry picked from commit e3afc690188be8e4385d13d1b0e7f0ba01caea40)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 733972fab07f..1dddd6abd77b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -50,7 +50,18 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
 			continue;
 
 		if (HAS_LMEM(dev_priv)) {
-			ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
+			unsigned int flags = obj->flags;
+
+			/*
+			 * For this type of buffer we need to able to read from the CPU
+			 * the clear color value found in the buffer, hence we need to
+			 * ensure it is always in the mappable part of lmem, if this is
+			 * a small-bar device.
+			 */
+			if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+				flags &= ~I915_BO_ALLOC_GPU_ONLY;
+			ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+							flags);
 			if (ret)
 				continue;
 		}
commit 695ddc9318ad45b6a32f902b7c6998c65d575f26
Author: Matthew Auld <matthew.auld at intel.com>
Date:   Tue Oct 4 14:19:14 2022 +0100

    drm/i915: allow control over the flags when migrating
    
    In the next patch we want to move the object (if the current resource is
    not compatible), to the mappable part of lmem for some display buffers.
    Currently that requires being able to unset the I915_BO_ALLOC_GPU_ONLY
    hint.
    
    Signed-off-by: Matthew Auld <matthew.auld at intel.com>
    Cc: Jianshui Yu <jianshui.yu at intel.com>
    Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Cc: Nirmoy Das <nirmoy.das at intel.com>
    Reviewed-by: Nirmoy Das <nirmoy.das at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221004131916.233474-3-matthew.auld@intel.com
    (cherry picked from commit 999f4562077208b683f0519e5f1aa1e5c2fd2191)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7ff9c7877bec..369006c5317f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -652,6 +652,41 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
 			    struct i915_gem_ww_ctx *ww,
 			    enum intel_region_id id)
+{
+	return __i915_gem_object_migrate(obj, ww, id, obj->flags);
+}
+
+/**
+ * __i915_gem_object_migrate - Migrate an object to the desired region id, with
+ * control of the extra flags
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ * @flags: The object flags. Normally just obj->flags.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			      struct i915_gem_ww_ctx *ww,
+			      enum intel_region_id id,
+			      unsigned int flags)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct intel_memory_region *mr;
@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
 		return 0;
 	}
 
-	return obj->ops->migrate(obj, mr);
+	return obj->ops->migrate(obj, mr, flags);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7317d4102955..1723af9b0f6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
 			    struct i915_gem_ww_ctx *ww,
 			    enum intel_region_id id);
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			      struct i915_gem_ww_ctx *ww,
+			      enum intel_region_id id,
+			      unsigned int flags);
 
 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
 				 enum intel_region_id id);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 40305e2bcd49..d0d6772e6f36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
 	 * pinning or for as long as the object lock is held.
 	 */
 	int (*migrate)(struct drm_i915_gem_object *obj,
-		       struct intel_memory_region *mr);
+		       struct intel_memory_region *mr,
+		       unsigned int flags);
 
 	void (*release)(struct drm_i915_gem_object *obj);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e3fc38dd5db0..4f861782c3e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
 }
 
 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
-			    struct intel_memory_region *mr)
+			    struct intel_memory_region *mr,
+			    unsigned int flags)
 {
-	return __i915_ttm_migrate(obj, mr, obj->flags);
+	return __i915_ttm_migrate(obj, mr, flags);
 }
 
 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
commit 1f768ba469002d2dcad5c3d667151977417df7d9
Author: Yang Li <yang.lee at linux.alibaba.com>
Date:   Mon Oct 10 15:38:03 2022 +0800

    drm/amd/display: Simplify bool conversion
    
    The result of 'pwr_status == 0' is Boolean, and the question mark
    expression is redundant.
    
    Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=2354
    Reported-by: Abaci Robot <abaci at linux.alibaba.com>
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Yang Li <yang.lee at linux.alibaba.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 426b07edb426..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -1400,7 +1400,7 @@ bool dcn32_dsc_pg_status(
 		break;
 	}
 
-	return pwr_status == 0 ? true : false;
+	return pwr_status == 0;
 }
 
 void dcn32_update_dsc_pg(struct dc *dc,
commit a895014853ea6df2778533e2a0bb7a0d53f02ec2
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Mon Sep 19 12:36:29 2022 -0400

    drm/amd/display: fix transfer function passed to build_coefficients()
    
    The default argument should be enum TRANSFER_FUNCTION_SRGB rather than
    the current boolean value which improperly maps to
    TRANSFER_FUNCTION_BT709.
    
    Commit 9b3d76527f6e ("drm/amd/display: Revert adding degamma coefficients")
    looks to have improperly reverted
    commit d02097095916 ("drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF is BT709")
    replacing the enum value with a boolean value.
    
    Cc: Krunoslav Kovac <Krunoslav.Kovac at amd.com>
    Cc: Jaehyun Chung <jaehyun.chung at amd.com>
    Cc: Zeng Heng <zengheng4 at huawei.com>
    Fixes: 9b3d76527f6e ("drm/amd/display: Revert adding degamma coefficients")
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 04f7656906ca..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1692,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
 	struct pwl_float_data_ex *rgb = rgb_regamma;
 	const struct hw_x_point *coord_x = coordinates_x;
 
-	build_coefficients(&coeff, true);
+	build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
 
 	i = 0;
 	while (i != hw_points_num + 1) {
commit f00844daa5212aac609d9cb97ce5e0a74c67890a
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Mon Oct 10 17:30:08 2022 -0400

    drm/amd/display: add a license to cursor_reg_cache.h
    
    It's MIT.
    
    Fixes: b73353f7f3d434 ("drm/amd/display: Use the same cursor info across features")
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 0e7c5880e867..45645f9fd86c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
 /* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
 
 #ifndef __DAL_CURSOR_CACHE_H__
commit b1d1666276cce28743e2cf90be07182ceac14f1e
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Mon Oct 10 10:37:43 2022 -0400

    drm/amd/display: make virtual_disable_link_output static
    
    It's not used outside of virtual_link_hwss.c.  Fixes
    a -Wmissing-prototypes warning.
    
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 9522fe0b36c9..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -37,7 +37,7 @@ void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
 {
 }
 
-void virtual_disable_link_output(struct dc_link *link,
+static void virtual_disable_link_output(struct dc_link *link,
 	const struct link_resource *link_res,
 	enum signal_type signal)
 {
commit e1e6889fc7b3e5152218db7d9f03c2f81569d54c
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Thu Oct 6 11:31:25 2022 -0400

    drm/amd/display: fix indentation in dc.c
    
    Fixes a warning in dc.c.
    
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 660316a536f7..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3526,9 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
 
 	if (update_type != UPDATE_TYPE_FAST)
 		dc->hwss.post_unlock_program_front_end(dc, context);
-		if (update_type != UPDATE_TYPE_FAST)
-			if (dc->hwss.commit_subvp_config)
-				dc->hwss.commit_subvp_config(dc, context);
+	if (update_type != UPDATE_TYPE_FAST)
+		if (dc->hwss.commit_subvp_config)
+			dc->hwss.commit_subvp_config(dc, context);
 
 	if (update_type != UPDATE_TYPE_FAST)
 		if (dc->hwss.commit_subvp_config)
commit 9f30bf9917612b3a85cc28dc8ef98667ad5c07f8
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Tue Oct 11 09:27:29 2022 -0400

    drm/amd/display: make dcn32_split_stream_for_mpc_or_odm static
    
    It's not used outside of dcn32_fpu.c.
    
    Fixes: 20dad3813b3c15 ("drm/amd/display: Add a helper to map ODM/MPC/Multi-Plane resources")
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Reported-by: kernel test robot <lkp at intel.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 2a3f5a485b2b..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1372,7 +1372,7 @@ static struct pipe_ctx *dcn32_find_split_pipe(
 	return pipe;
 }
 
-bool dcn32_split_stream_for_mpc_or_odm(
+static bool dcn32_split_stream_for_mpc_or_odm(
 		const struct dc *dc,
 		struct resource_context *res_ctx,
 		struct pipe_ctx *pri_pipe,
commit eff4ccd11313ecc8ec94c0f39961ffbf227a406d
Author: Yang Yingliang <yangyingliang at huawei.com>
Date:   Tue Oct 11 20:41:03 2022 +0800

    drm/amd/display: fix build error on arm64
    
    dcn20_build_mapped_resource() and dcn20_acquire_dsc() is not defined,
    if CONFIG_DRM_AMD_DC_DCN is disabled.
    
    Fix the following build error on arm64:
    
      ERROR: modpost: "dcn20_build_mapped_resource" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
      ERROR: modpost: "dcn20_acquire_dsc" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
    
    Fixes: 20dad3813b3c ("drm/amd/display: Add a helper to map ODM/MPC/Multi-Plane resources")
    Signed-off-by: Yang Yingliang <yangyingliang at huawei.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4a6e867369b8..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3721,12 +3721,16 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
 		else
 			sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
 		if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 			dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
 			ASSERT(sec_pipe->stream_res.dsc);
 			if (sec_pipe->stream_res.dsc == NULL)
 				return false;
 		}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 		dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
 	}
 
 	return true;
commit 1ba25b6ff24303fac890d657ffdebf3e8db3bc25
Author: Aric Cyr <aric.cyr at amd.com>
Date:   Sun Oct 2 11:59:13 2022 -0400

    drm/amd/display: 3.2.207
    
    DC version 3.2.207 brings along the following:
    - PMFW z-state interface update
    - Cursor update refactor
    - Fixes to DSC validation, DCFCLK during Freesync, etc.
    - Code cleanup
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Aric Cyr <aric.cyr at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5d0103e20412..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.206"
+#define DC_VER "3.2.207"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
commit 09f1ef99ce900dbc3659d478f006081c96cc977f
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 27 17:59:21 2022 -0400

    drm/amd/display: Clean some DCN32 macros
    
    Some unused macros might mislead developers during the debug, which can
    be removed without any issue. This commit drops some unused references
    to SE_COMMON_MASK_SH_LIST_DCN32.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 20e5f016a45a..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -95,7 +95,7 @@
 	SRI(DIG_FIFO_CTRL0, DIG, id)
 
 
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
 	SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
 	SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
 	SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -247,15 +247,6 @@
 	SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
 	SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
-	SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
-	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
-	SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
 void dcn32_dio_stream_encoder_construct(
 	struct dcn10_stream_encoder *enc1,
 	struct dc_context *ctx,
commit 832e72dd0d705bfcb4236bb2d561d82afe253e63
Author: Candice Li <candice.li at amd.com>
Date:   Mon Sep 26 16:21:05 2022 +0800

    drm/amdgpu: Add poison mode query for umc v8_10_0
    
    Add poison mode query support on umc v8_10_0.
    
    Signed-off-by: Candice Li <candice.li at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index 38f9e29990cc..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -330,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
 	}
 }
 
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+						struct amdgpu_device *adev,
+						uint32_t umc_reg_offset)
+{
+	uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+	ecc_ctrl_addr =
+		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+	ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+					umc_reg_offset) * 4);
+
+	return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+	uint32_t umc_reg_offset  = 0;
+
+	/* Enabling fatal error in umc node0 instance0 channel0 will be
+	 * considered as fatal error mode
+	 */
+	umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+	return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
 const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
 	.query_ras_error_count = umc_v8_10_query_ras_error_count,
 	.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -340,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
 		.hw_ops = &umc_v8_10_ras_hw_ops,
 	},
 	.err_cnt_init = umc_v8_10_err_cnt_init,
+	.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
 };
commit 6dddc1eb9632b0eb6098d1dc849e8acb2408c1b6
Author: Candice Li <candice.li at amd.com>
Date:   Mon Sep 26 16:18:56 2022 +0800

    drm/amdgpu: Update umc v8_10_0 headers
    
    Add GeccCtrl offset and mask to umc v8_10_0 headers.
    
    Signed-off-by: Candice Li <candice.li at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
 #define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX  2
 #define regMCA_UMC_UMC0_MCUMC_ADDRT0             0x03c4
 #define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX    2
+#define regUMCCH0_0_GeccCtrl                     0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX            2
 
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
 #define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT        0x0
 #define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT         0x38
 #define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK          0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT                0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK                  0x00002000L
 
 #endif
commit 38dbbfa57c08b29ef8cf1d3fb3ad639ae819754e
Author: Tao Zhou <tao.zhou1 at amd.com>
Date:   Thu Sep 29 14:59:11 2022 +0800

    drm/amdgpu: fix coding style issue for mca notifier
    
    Fix some issues found by checkpatch script.
    
    Signed-off-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c2f9970e851c..2dad7aa9a03b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2877,9 +2877,9 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
 	err_data.err_addr =
 		kcalloc(adev->umc.max_ras_err_cnt_per_query,
 			sizeof(struct eeprom_table_record), GFP_KERNEL);
-	if(!err_data.err_addr) {
-		dev_warn(adev->dev, "Failed to alloc memory for "
-				"umc error address record in mca notifier!\n");
+	if (!err_data.err_addr) {
+		dev_warn(adev->dev,
+			"Failed to alloc memory for umc error record in mca notifier!\n");
 		return NOTIFY_DONE;
 	}
 
commit fb4d5891cee6d1c14b8d8f1b65c9d061ed3a495c
Author: Tao Zhou <tao.zhou1 at amd.com>
Date:   Tue Sep 27 11:42:45 2022 +0800

    drm/amdgpu: define convert_error_address for umc v8.7
    
    So the code can be simplified.
    
    Signed-off-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index e2623685cb44..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
 	}
 }
 
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+					struct ras_err_data *err_data, uint64_t err_addr,
+					uint32_t ch_inst, uint32_t umc_inst)
+{
+	uint64_t retired_page;
+	uint32_t channel_index;
+
+	channel_index =
+		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+	/* translate umc channel address to soc pa, 3 parts are included */
+	retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+			ADDR_OF_256B_BLOCK(channel_index) |
+			OFFSET_IN_256B_BLOCK(err_addr);
+
+	amdgpu_umc_fill_error_record(err_data, err_addr,
+				retired_page, channel_index, umc_inst);
+}
+
 static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 					struct ras_err_data *err_data,
 					uint32_t ch_inst,
 					uint32_t umc_inst)
 {
-	uint64_t mc_umc_status, err_addr, retired_page;
-	uint32_t channel_index;
+	uint64_t mc_umc_status, err_addr;
 	uint32_t eccinfo_table_idx;
 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
 	eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
-	channel_index =
-		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
 	mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
 
 	if (mc_umc_status == 0)
@@ -137,13 +152,8 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 		err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
-		/* translate umc channel address to soc pa, 3 parts are included */
-		retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
-				ADDR_OF_256B_BLOCK(channel_index) |
-				OFFSET_IN_256B_BLOCK(err_addr);
-
-		amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
+		umc_v8_7_convert_error_address(adev, err_data, err_addr,
+						ch_inst, umc_inst);
 	}
 }
 
@@ -320,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 					 uint32_t umc_inst)
 {
 	uint32_t lsb, mc_umc_status_addr;
-	uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
-	uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+	uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
 
 	mc_umc_status_addr =
 		SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
 	mc_umc_addrt0 =
 		SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
 	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 
 	if (mc_umc_status == 0)
@@ -349,13 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 		err_addr &= ~((0x1ULL << lsb) - 1);
 
-		/* translate umc channel address to soc pa, 3 parts are included */
-		retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
-				ADDR_OF_256B_BLOCK(channel_index) |
-				OFFSET_IN_256B_BLOCK(err_addr);
-
-		amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
+		umc_v8_7_convert_error_address(adev, err_data, err_addr,
+								ch_inst, umc_inst);
 	}
 
 	/* clear umc status */
commit 44420ac5f855f5704d8f939926ed145f99e49e55
Author: Tao Zhou <tao.zhou1 at amd.com>
Date:   Tue Sep 27 11:36:46 2022 +0800

    drm/amdgpu: define RAS convert_error_address API
    
    Make the code reusable and remove redundant code.
    
    Signed-off-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ccebd8e2a2d8..c2f9970e851c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2889,7 +2889,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
 	if (adev->umc.ras &&
 	    adev->umc.ras->convert_ras_error_address)
 		adev->umc.ras->convert_ras_error_address(adev,
-			&err_data, 0, ch_inst, umc_inst, m->addr);
+			&err_data, m->addr, ch_inst, umc_inst);
 
 	if (amdgpu_bad_page_threshold != 0) {
 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 2fb4951a6433..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,8 +22,6 @@
 #define __AMDGPU_UMC_H__
 #include "amdgpu_ras.h"
 
-#define UMC_INVALID_ADDR 0x1ULL
-
 /*
  * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
  * is the index of 4KB block
@@ -54,9 +52,8 @@ struct amdgpu_umc_ras {
 	void (*err_cnt_init)(struct amdgpu_device *adev);
 	bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
 	void (*convert_ras_error_address)(struct amdgpu_device *adev,
-						 struct ras_err_data *err_data,
-						 uint32_t umc_reg_offset, uint32_t ch_inst,
-						 uint32_t umc_inst, uint64_t mca_addr);
+				struct ras_err_data *err_data, uint64_t err_addr,
+				uint32_t ch_inst, uint32_t umc_inst);
 	void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
 				      void *ras_error_status);
 	void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 64d760eb92a3..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
 	}
 }
 
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+					struct ras_err_data *err_data, uint64_t err_addr,
+					uint32_t ch_inst, uint32_t umc_inst)
+{
+	uint32_t channel_index;
+	uint64_t soc_pa, retired_page, column;
+
+	channel_index =
+		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+	/* translate umc channel address to soc pa, 3 parts are included */
+	soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+			ADDR_OF_256B_BLOCK(channel_index) |
+			OFFSET_IN_256B_BLOCK(err_addr);
+
+	/* The umc channel bits are not original values, they are hashed */
+	SET_CHANNEL_HASH(channel_index, soc_pa);
+
+	/* clear [C4 C3 C2] in soc physical address */
+	soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+	/* loop for all possibilities of [C4 C3 C2] */
+	for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+		retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+		dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+		amdgpu_umc_fill_error_record(err_data, err_addr,
+			retired_page, channel_index, umc_inst);
+
+		/* shift R14 bit */
+		retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+		dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+		amdgpu_umc_fill_error_record(err_data, err_addr,
+			retired_page, channel_index, umc_inst);
+	}
+}
+
 static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 					 struct ras_err_data *err_data,
 					 uint32_t ch_inst,
 					 uint32_t umc_inst)
 {
-	uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
-	uint32_t channel_index;
+	uint64_t mc_umc_status, err_addr;
 	uint32_t eccinfo_table_idx;
 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
 	eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
-	channel_index =
-		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
 	mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
 
 	if (mc_umc_status == 0)
@@ -216,30 +247,8 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 		err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
-		/* translate umc channel address to soc pa, 3 parts are included */
-		soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
-				ADDR_OF_256B_BLOCK(channel_index) |
-				OFFSET_IN_256B_BLOCK(err_addr);
-
-		/* The umc channel bits are not original values, they are hashed */
-		SET_CHANNEL_HASH(channel_index, soc_pa);
-
-		/* clear [C4 C3 C2] in soc physical address */
-		soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
-		/* loop for all possibilities of [C4 C3 C2] */
-		for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-			retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-			amdgpu_umc_fill_error_record(err_data, err_addr,
-				retired_page, channel_index, umc_inst);
-
-			/* shift R14 bit */
-			retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-			amdgpu_umc_fill_error_record(err_data, err_addr,
-				retired_page, channel_index, umc_inst);
-		}
+		umc_v6_7_convert_error_address(adev, err_data, err_addr,
+					ch_inst, umc_inst);
 	}
 }
 
@@ -448,75 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
 static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
 					 struct ras_err_data *err_data,
 					 uint32_t umc_reg_offset, uint32_t ch_inst,
-					 uint32_t umc_inst, uint64_t mca_addr)
+					 uint32_t umc_inst)
 {
 	uint32_t mc_umc_status_addr;
-	uint32_t channel_index;
-	uint64_t mc_umc_status = 0, mc_umc_addrt0;
-	uint64_t err_addr, soc_pa, retired_page, column;
+	uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
 
-	if (mca_addr == UMC_INVALID_ADDR) {
-		mc_umc_status_addr =
-			SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
-		mc_umc_addrt0 =
-			SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+	mc_umc_status_addr =
+		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+	mc_umc_addrt0 =
+		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
 
-		mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 
-		if (mc_umc_status == 0)
-			return;
+	if (mc_umc_status == 0)
+		return;
 
-		if (!err_data->err_addr) {
-			/* clear umc status */
-			WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
-			return;
-		}
+	if (!err_data->err_addr) {
+		/* clear umc status */
+		WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+		return;
 	}
 
-	channel_index =
-		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
 	/* calculate error address if ue error is detected */
-	if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) ||
-	    mca_addr != UMC_INVALID_ADDR) {
-		if (mca_addr == UMC_INVALID_ADDR) {
-			err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
-			err_addr =
-				REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
-		} else {
-			err_addr = mca_addr;
-		}
+	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
+		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+		err_addr =
+			REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
-		/* translate umc channel address to soc pa, 3 parts are included */
-		soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
-				ADDR_OF_256B_BLOCK(channel_index) |
-				OFFSET_IN_256B_BLOCK(err_addr);
-
-		/* The umc channel bits are not original values, they are hashed */
-		SET_CHANNEL_HASH(channel_index, soc_pa);
-
-		/* clear [C4 C3 C2] in soc physical address */
-		soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
-		/* loop for all possibilities of [C4 C3 C2] */
-		for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-			retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-			amdgpu_umc_fill_error_record(err_data, err_addr,
-				retired_page, channel_index, umc_inst);
-
-			/* shift R14 bit */
-			retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-			amdgpu_umc_fill_error_record(err_data, err_addr,
-				retired_page, channel_index, umc_inst);
-		}
+		umc_v6_7_convert_error_address(adev, err_data, err_addr,
+					ch_inst, umc_inst);
 	}
 
 	/* clear umc status */
-	if (mca_addr == UMC_INVALID_ADDR)
-		WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+	WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
 }
 
 static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -538,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
 		umc_v6_7_query_error_address(adev,
 					     err_data,
 					     umc_reg_offset, ch_inst,
-					     umc_inst, UMC_INVALID_ADDR);
+					     umc_inst);
 	}
 }
 
@@ -579,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
 	.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
 	.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
 	.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
-	.convert_ras_error_address = umc_v6_7_query_error_address,
+	.convert_ras_error_address = umc_v6_7_convert_error_address,
 };
commit cdbb816b5bfeb69ad925805d99b2ec312b241f1c
Author: Tao Zhou <tao.zhou1 at amd.com>
Date:   Mon Sep 26 17:01:33 2022 +0800

    drm/amdgpu: remove check for CE in RAS error address query
    
    Only RAS UE error address is queried currently, no need to check CE status.
    
    Signed-off-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 		return;
 	}
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
 
 		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
 		/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 				ADDR_OF_256B_BLOCK(channel_index) |
 				OFFSET_IN_256B_BLOCK(err_addr);
 
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-				== 1)
-			amdgpu_umc_fill_error_record(err_data, err_addr,
+		amdgpu_umc_fill_error_record(err_data, err_addr,
 					retired_page, channel_index, umc_inst);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index a0d19b768346..64d760eb92a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -209,10 +209,9 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 	if (!err_data->err_addr)
 		return;
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
 
 		err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -228,22 +227,18 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 		/* clear [C4 C3 C2] in soc physical address */
 		soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
 
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-				== 1) {
-			/* loop for all possibilities of [C4 C3 C2] */
-			for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-				retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-				dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-				amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
-
-				/* shift R14 bit */
-				retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-				dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-				amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
-			}
+		/* loop for all possibilities of [C4 C3 C2] */
+		for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+			retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+			amdgpu_umc_fill_error_record(err_data, err_addr,
+				retired_page, channel_index, umc_inst);
+
+			/* shift R14 bit */
+			retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+			amdgpu_umc_fill_error_record(err_data, err_addr,
+				retired_page, channel_index, umc_inst);
 		}
 	}
 }
@@ -481,10 +476,9 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
 	channel_index =
 		adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) ||
 	    mca_addr != UMC_INVALID_ADDR) {
 		if (mca_addr == UMC_INVALID_ADDR) {
 			err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -505,23 +499,18 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
 		/* clear [C4 C3 C2] in soc physical address */
 		soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
 
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-				== 1 ||
-		    mca_addr != UMC_INVALID_ADDR) {
-			/* loop for all possibilities of [C4 C3 C2] */
-			for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-				retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-				dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-				amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
-
-				/* shift R14 bit */
-				retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-				dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-				amdgpu_umc_fill_error_record(err_data, err_addr,
-					retired_page, channel_index, umc_inst);
-			}
+		/* loop for all possibilities of [C4 C3 C2] */
+		for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+			retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+			amdgpu_umc_fill_error_record(err_data, err_addr,
+				retired_page, channel_index, umc_inst);
+
+			/* shift R14 bit */
+			retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+			dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+			amdgpu_umc_fill_error_record(err_data, err_addr,
+				retired_page, channel_index, umc_inst);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index a8cbda81828d..38f9e29990cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
 {
 	uint64_t mc_umc_status_addr;
 	uint64_t mc_umc_status, err_addr;
-	uint32_t channel_index;
+	uint64_t mc_umc_addrt0, na_err_addr_base;
+	uint64_t na_err_addr, retired_page_addr;
+	uint32_t channel_index, addr_lsb, col = 0;
+	int ret = 0;
 
 	mc_umc_status_addr =
 		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
 					umc_inst * adev->umc.channel_inst_num +
 					ch_inst];
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	     REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-		uint32_t addr_lsb;
-		uint64_t mc_umc_addrt0;
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
 
 		mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
 		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
 
 		/* the lowest lsb bits should be ignored */
 		addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
 		err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
-			uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
-			uint64_t na_err_addr, retired_page_addr;
-			uint32_t col = 0;
-			int ret = 0;
-
-			/* loop for all possibilities of [C6 C5] in normal address. */
-			for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
-				na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
-				/* Mapping normal error address to retired soc physical address. */
-				ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
-								na_err_addr, &retired_page_addr);
-				if (ret) {
-					dev_err(adev->dev, "Failed to map pa from umc na.\n");
-					break;
-				}
-				dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
-					retired_page_addr);
-				amdgpu_umc_fill_error_record(err_data, na_err_addr,
-						retired_page_addr, channel_index, umc_inst);
+		na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+		/* loop for all possibilities of [C6 C5] in normal address. */
+		for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+			na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+			/* Mapping normal error address to retired soc physical address. */
+			ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+							na_err_addr, &retired_page_addr);
+			if (ret) {
+				dev_err(adev->dev, "Failed to map pa from umc na.\n");
+				break;
 			}
+			dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+				retired_page_addr);
+			amdgpu_umc_fill_error_record(err_data, na_err_addr,
+					retired_page_addr, channel_index, umc_inst);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..e2623685cb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -130,10 +130,9 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 	if (!err_data->err_addr)
 		return;
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
 
 		err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -143,10 +142,7 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
 				ADDR_OF_256B_BLOCK(channel_index) |
 				OFFSET_IN_256B_BLOCK(err_addr);
 
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-				== 1)
-			amdgpu_umc_fill_error_record(err_data, err_addr,
+		amdgpu_umc_fill_error_record(err_data, err_addr,
 					retired_page, channel_index, umc_inst);
 	}
 }
@@ -343,10 +339,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 		return;
 	}
 
-	/* calculate error address if ue/ce error is detected */
+	/* calculate error address if ue error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
-	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
 
 		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
 		/* the lowest lsb bits should be ignored */
@@ -359,10 +354,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 				ADDR_OF_256B_BLOCK(channel_index) |
 				OFFSET_IN_256B_BLOCK(err_addr);
 
-		/* we only save ue error information currently, ce is skipped */
-		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-				== 1)
-			amdgpu_umc_fill_error_record(err_data, err_addr,
+		amdgpu_umc_fill_error_record(err_data, err_addr,
 					retired_page, channel_index, umc_inst);
 	}
 
commit 6c482c62a635aa4f534d2439fbf8afa37452b986
Author: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Date:   Wed Oct 5 14:11:59 2022 +0200

    drm/i915: Fix display problems after resume
    
    Commit 39a2bd34c933 ("drm/i915: Use the vma resource as argument for gtt
    binding / unbinding") introduced a regression that due to the vma resource
    tracking of the binding state, dpt ptes were not correctly repopulated.
    Fix this by clearing the vma resource state before repopulating.
    The state will subsequently be restored by the bind_vma operation.
    
    Fixes: 39a2bd34c933 ("drm/i915: Use the vma resource as argument for gtt binding / unbinding")
    Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20220912121957.31310-1-thomas.hellstrom@linux.intel.com
    Cc: Matthew Auld <matthew.auld at intel.com>
    Cc: intel-gfx at lists.freedesktop.org
    Cc: <stable at vger.kernel.org> # v5.18+
    Reported-and-tested-by: Kevin Boulain <kevinboulain at gmail.com>
    Tested-by: David de Sousa <davidesousa at gmail.com>
    Reviewed-by: Matthew Auld <matthew.auld at intel.com>
    Reviewed-by: Andrzej Hajda <andrzej.hajda at intel.com>
    Signed-off-by: Matthew Auld <matthew.auld at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221005121159.340245-1-thomas.hellstrom@linux.intel.com
    (cherry picked from commit bc2472538c0d1cce334ffc9e97df0614cd2b1469)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 30cf5c3369d9..2049a00417af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
 			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
 
 		GEM_BUG_ON(!was_bound);
-		if (!retained_ptes)
+		if (!retained_ptes) {
+			/*
+			 * Clear the bound flags of the vma resource to allow
+			 * ptes to be repopulated.
+			 */
+			vma->resource->bound_flags = 0;
 			vma->ops->bind_vma(vm, NULL, vma->resource,
 					   obj ? obj->cache_level : 0,
 					   was_bound);
+		}
 		if (obj) { /* only used during resume => exclusive access */
 			write_domain_objs |= fetch_and_zero(&obj->write_domain);
 			obj->read_domains |= I915_GEM_DOMAIN_GTT;
commit faf4d8e07f5b67bece91723ad3e8b3f88a3dbf23
Author: Guenter Roeck <linux at roeck-us.net>
Date:   Sun Oct 9 23:05:12 2022 -0700

    drm/amd/display: fix array-bounds error in dc_stream_remove_writeback() [take 2]
    
    Commit 5d8c3e836fc2 ("drm/amd/display: fix array-bounds error in
    dc_stream_remove_writeback()") tried to fix an array bounds error seen
    with gcc 12.0. Unfortunately, that results in another array bounds error,
    seen with older versions of gcc.
    
    Building csky:allmodconfig ... failed
    --------------
    Error log:
    drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_stream.c:
            In function 'dc_stream_remove_writeback':
    drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_stream.c:527:83:
            error: array subscript 1 is above array bounds of 'struct dc_writeback_info[1]' [-Werror=array-bounds]
      527 |                                 stream->writeback_info[j] = stream->writeback_info[i];
          |                                                             ~~~~~~~~~~~~~~~~~~~~~~^~~
    In file included from drivers/gpu/drm/amd/amdgpu/../display/dc/dc.h:1269,
                     from drivers/gpu/drm/amd/amdgpu/../display/dc/inc/core_types.h:29,
                     from drivers/gpu/drm/amd/amdgpu/../display/dc/basics/dc_common.h:29,
                     from drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_stream.c:27:
    drivers/gpu/drm/amd/amdgpu/../display/dc/dc_stream.h:241:34: note: while referencing 'writeback_info'
      241 |         struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
    
    We could check both i and j for overflow to fix the problem. That would,
    however, be not make much sense since it is known and provable that j <= i.
    Also, the check introduced with commit 5d8c3e836fc2 does not really add
    value since it checks if j < MAX_DWB_PIPES. Since it is known that j <= i,
    it would make more sense to check if i < MAX_DWB_PIPES. Unfortunately, that
    does not help to solve the problem observed here: gcc still complains.
    
    To solve the problem, replace the subsequent check for 'i != j' with
    'j < i'. This is identical to the original check since we know that j <= i,
    and it makes all versions of gcc happy. Drop the check introduced with
    commit 5d8c3e836fc2 since it is not really useful and does not solve the
    problem.
    
    Cc: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Cc: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Fixes: 5d8c3e836fc2 ("drm/amd/display: fix array-bounds error in dc_stream_remove_writeback()")
    Signed-off-by: Guenter Roeck <linux at roeck-us.net>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9998f58c14b9..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -524,9 +524,9 @@ bool dc_stream_remove_writeback(struct dc *dc,
 	}
 
 	/* remove writeback info for disabled writeback pipes from stream */
-	for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
+	for (i = 0, j = 0; i < stream->num_wb_info; i++) {
 		if (stream->writeback_info[i].wb_enabled) {
-			if (i != j)
+			if (j < i)
 				/* trim the array */
 				stream->writeback_info[j] = stream->writeback_info[i];
 			j++;
commit d2bd0831b51d1123fc86c019db3452d6a1ce5029
Author: Alexey Kodanev <aleksei.kodanev at bell-sw.com>
Date:   Tue Oct 4 11:14:02 2022 +0300

    drm/amd/pm: smu7_hwmgr: fix potential off-by-one overflow in 'performance_levels'
    
    Since 'hardwareActivityPerformanceLevels' is set to the size of the
    'performance_levels' array in smu7_hwmgr_backend_init(), using the
    '<=' assertion to check for the next index value is incorrect.
    Replace it with '<'.
    
    Detected using the static analysis tool - Svace.
    Fixes: 599a7e9fe1b6 ("drm/amd/powerplay: implement smu7 hwmgr to manager asics with smu ip version 7.")
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alexey Kodanev <aleksei.kodanev at bell-sw.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
 			return -EINVAL);
 
 	PP_ASSERT_WITH_CODE(
-			(smu7_power_state->performance_level_count <=
+			(smu7_power_state->performance_level_count <
 					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
 			"Performance levels exceeds Driver limit!",
 			return -EINVAL);
commit 2cc4a5914ce952d6fc83b0f8089a23095ad4f677
Author: Alexey Kodanev <aleksei.kodanev at bell-sw.com>
Date:   Tue Oct 4 11:14:01 2022 +0300

    drm/amd/pm: vega10_hwmgr: fix potential off-by-one overflow in 'performance_levels'
    
    Since 'hardwareActivityPerformanceLevels' is set to the size of the
    'performance_levels' array in vega10_hwmgr_backend_init(), using the
    '<=' assertion to check for the next index value is incorrect.
    Replace it with '<'.
    
    Detected using the static analysis tool - Svace.
    Fixes: f83a9991648b ("drm/amd/powerplay: add Vega10 powerplay support (v5)")
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alexey Kodanev <aleksei.kodanev at bell-sw.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
 			return -1);
 
 	PP_ASSERT_WITH_CODE(
-			(vega10_ps->performance_level_count <=
+			(vega10_ps->performance_level_count <
 					hwmgr->platform_descriptor.
 					hardwareActivityPerformanceLevels),
 			"Performance levels exceeds Driver limit!",
commit a98cec220aa4b2502704aa0196da1bdc9eb455b4
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Thu Oct 6 15:53:10 2022 -0400

    drm/amdgpu: fix SDMA suspend/resume on SR-IOV
    
    Update all SDMA versions that support SR-IOV to properly
    tear down the ttm buffer functions on suspend.
    
    Tested-by: Bokun Zhang <Bokun.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7b4195f18a7c..298fa11702e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1940,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int i;
 
-	if (amdgpu_sriov_vf(adev))
+	if (amdgpu_sriov_vf(adev)) {
+		/* disable the scheduler for SDMA */
+		amdgpu_sdma_unset_buffer_funcs_helper(adev);
 		return 0;
+	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 783048e1b0ce..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1456,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (amdgpu_sriov_vf(adev))
+	if (amdgpu_sriov_vf(adev)) {
+		/* disable the scheduler for SDMA */
+		amdgpu_sdma_unset_buffer_funcs_helper(adev);
 		return 0;
+	}
 
 	sdma_v5_0_ctx_switch_enable(adev, false);
 	sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index c2ee53c2dd1b..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1349,19 +1349,15 @@ static int sdma_v5_2_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	/*
-	 * Under SRIOV, the VF cannot single-mindedly stop SDMA engine
-	 * However, we still need to clean up the DRM entity
-	 * Therefore, we will re-enable SDMA afterwards.
-	 */
-	sdma_v5_2_ctx_switch_enable(adev, false);
-	sdma_v5_2_enable(adev, false);
-
 	if (amdgpu_sriov_vf(adev)) {
-		sdma_v5_2_enable(adev, true);
-		sdma_v5_2_ctx_switch_enable(adev, true);
+		/* disable the scheduler for SDMA */
+		amdgpu_sdma_unset_buffer_funcs_helper(adev);
+		return 0;
 	}
 
+	sdma_v5_2_ctx_switch_enable(adev, false);
+	sdma_v5_2_enable(adev, false);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index a6483483404e..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1311,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (amdgpu_sriov_vf(adev))
+	if (amdgpu_sriov_vf(adev)) {
+		/* disable the scheduler for SDMA */
+		amdgpu_sdma_unset_buffer_funcs_helper(adev);
 		return 0;
+	}
 
 	sdma_v6_0_ctx_switch_enable(adev, false);
 	sdma_v6_0_enable(adev, false);
commit 571c053658926df3321633b7133f574d3e656c81
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Thu Oct 6 15:31:40 2022 -0400

    drm/amdgpu: switch sdma buffer function tear down to a helper
    
    Switch all of the SDMA implementations to use the helper to
    tear down the ttm buffer manager.
    
    Tested-by: Bokun Zhang <Bokun.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 43cf8632cc1a..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -285,3 +285,24 @@ out:
 	}
 	return err;
 }
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *sdma;
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		if (adev->sdma.has_page_queue) {
+			sdma = &adev->sdma.instance[i].page;
+			if (adev->mman.buffer_funcs_ring == sdma) {
+				amdgpu_ttm_set_buffer_funcs_status(adev, false);
+				break;
+			}
+		}
+		sdma = &adev->sdma.instance[i].ring;
+		if (adev->mman.buffer_funcs_ring == sdma) {
+			amdgpu_ttm_set_buffer_funcs_status(adev, false);
+			break;
+		}
+	}
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d2d88279fefb..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
         char *fw_name, u32 instance, bool duplicate);
 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
         bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
  */
 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 	u32 rb_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1))
-			amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7241a9fb0121..7b4195f18a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
 	u32 rb_cntl, ib_cntl;
-	int i, unset = 0;
+	int i;
 
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sdma[i] = &adev->sdma.instance[i].ring;
-
-		if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
-			amdgpu_ttm_set_buffer_funcs_status(adev, false);
-			unset = 1;
-		}
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 		WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
 	u32 rb_cntl, ib_cntl;
 	int i;
-	bool unset = false;
 
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sdma[i] = &adev->sdma.instance[i].page;
-
-		if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
-			(!unset)) {
-			amdgpu_ttm_set_buffer_funcs_status(adev, false);
-			unset = true;
-		}
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
 					RB_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index c05c3eebde4c..783048e1b0ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 3eaf1a573e73..c2ee53c2dd1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
-	struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
-	struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1) ||
-	    (adev->mman.buffer_funcs_ring == sdma2) ||
-	    (adev->mman.buffer_funcs_ring == sdma3))
-		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 0150f66a5ae6..a6483483404e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	if ((adev->mman.buffer_funcs_ring == sdma0) ||
-	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
 		WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
 	}
-
-	sdma0->sched.ready = false;
-	sdma1->sched.ready = false;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
 	u32 rb_cntl;
 	unsigned i;
 
+	amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
 		/* dma0 */
 		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
 		rb_cntl &= ~DMA_RB_ENABLE;
 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
-		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 	}
 }
 
commit e5da651985be20616a9e0662032e0ea2ee4dd468
Author: Bokun Zhang <Bokun.Zhang at amd.com>
Date:   Fri Oct 7 02:08:38 2022 +0800

    drm/amdgpu: Fix SDMA engine resume issue under SRIOV
    
    - Under SRIOV, SDMA engine is shared between VFs. Therefore,
      we will not stop SDMA during hw_fini. This is not an issue
      with normal dirver loading and unloading.
    
    - However, when we put the SDMA engine to suspend state and resume
      it, the issue starts to show up. Something could attempt to use
      that SDMA engine to clear or move memory before the engine is
      initialized since the DRM entity is still there.
    
    - Therefore, we will call sdma_v5_2_enable(false) during hw_fini,
      and if we are under SRIOV, we will call sdma_v5_2_enable(true)
      afterwards to allow other VFs to use SDMA. This way, the DRM
      entity of SDMA engine is emptied and it will follow the flow
      of resume code path.
    
    Tested-by: Bokun Zhang <Bokun.Zhang at amd.com>
    Signed-off-by: Bokun Zhang <Bokun.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f136fec7b4f4..3eaf1a573e73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1357,12 +1357,19 @@ static int sdma_v5_2_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (amdgpu_sriov_vf(adev))
-		return 0;
-
+	/*
+	 * Under SRIOV, the VF cannot single-mindedly stop SDMA engine
+	 * However, we still need to clean up the DRM entity
+	 * Therefore, we will re-enable SDMA afterwards.
+	 */
 	sdma_v5_2_ctx_switch_enable(adev, false);
 	sdma_v5_2_enable(adev, false);
 
+	if (amdgpu_sriov_vf(adev)) {
+		sdma_v5_2_enable(adev, true);
+		sdma_v5_2_ctx_switch_enable(adev, true);
+	}
+
 	return 0;
 }
 
commit 9799702360d51a714e888fef4ab5fb9123dfb41f
Author: Alvin Lee <Alvin.Lee2 at amd.com>
Date:   Wed Jun 29 12:35:12 2022 -0400

    drm/amd/display: Fix watermark calculation
    
    Watermark calculation was incorrect due to missing brackets.
    
    Fixes: 85f4bc0c333c ("drm/amd/display: Add SubVP required code")
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index bbde635c56fc..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -781,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 		// Store the original watermark value for this SubVP config so we can lower it when the
 		// MCLK switch starts
 		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
-				dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
 
 		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
 	}
commit c8588697aa4ec1f3b7fc09277cf2a5a662d40834
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 27 18:30:08 2022 -0400

    drm/amd/display: Drop uncessary OTG lock check
    
    The OTG_MASTER_UPDATE_LOCK_SEL is used for GSL and OTGs in the same
    group for selecting the OTG_MASTER_UPDATE_LOCK from the same OTG. At
    some point, it a check was added to see if OTG is running or not, which
    is not necessary, and for this reason, this commit dropped that check.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index dca8a1446120..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -646,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
 void optc1_lock(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
-	uint32_t regval = 0;
-
-	regval = REG_READ(OTG_CONTROL);
-
-	/* otg is not running, do not need to be locked */
-	if ((regval & 0x1) == 0x0)
-		return;
 
 	REG_SET(OTG_GLOBAL_CONTROL0, 0,
 			OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -660,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
 			OTG_MASTER_UPDATE_LOCK, 1);
 
 	/* Should be fast, status does not update on maximus */
-	if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+	if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
 		REG_WAIT(OTG_MASTER_UPDATE_LOCK,
 				UPDATE_LOCK_STATUS, 1,
 				1, 10);
-	}
 }
 
 void optc1_unlock(struct timing_generator *optc)
commit 15e8b368981e1e8420f08b35bb12b794b200f4a0
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Fri Mar 12 11:58:57 2021 -0500

    drm/amd/display: Use set_vtotal_min_max to configure OTG VTOTAL
    
    In multiple parts of the DCN code, we write directly to the
    OTG_V_TOTAL_* registers in some OPTC functions. Let's avoid it by using
    the set_vtotal_min_max.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 143a900d4d3d..dca8a1446120 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
 	/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
 	 * OTG_V_TOTAL_MIN are equal to V_TOTAL.
 	 */
-	REG_SET(OTG_V_TOTAL_MAX, 0,
-		OTG_V_TOTAL_MAX, v_total);
-	REG_SET(OTG_V_TOTAL_MIN, 0,
-		OTG_V_TOTAL_MIN, v_total);
+	optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
 
 	/* v_sync_start = 0, v_sync_end = v_sync_width */
 	v_sync_end = patched_crtc_timing.v_sync_width;
@@ -931,11 +928,7 @@ void optc1_set_drr(
 
 		}
 
-		REG_SET(OTG_V_TOTAL_MAX, 0,
-			OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
-		REG_SET(OTG_V_TOTAL_MIN, 0,
-			OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+		optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
 
 		REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
 				OTG_V_TOTAL_MIN_SEL, 1,
@@ -954,11 +947,7 @@ void optc1_set_drr(
 				OTG_V_TOTAL_MAX_SEL, 0,
 				OTG_FORCE_LOCK_ON_EVENT, 0);
 
-		REG_SET(OTG_V_TOTAL_MIN, 0,
-			OTG_V_TOTAL_MIN, 0);
-
-		REG_SET(OTG_V_TOTAL_MAX, 0,
-			OTG_V_TOTAL_MAX, 0);
+		optc->funcs->set_vtotal_min_max(optc, 0, 0);
 	}
 }
 
@@ -1577,6 +1566,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
 		.enable_optc_clock = optc1_enable_optc_clock,
 		.set_drr = optc1_set_drr,
 		.get_last_used_drr_vtotal = NULL,
+		.set_vtotal_min_max = optc1_set_vtotal_min_max,
 		.set_static_screen_control = optc1_set_static_screen_control,
 		.set_test_pattern = optc1_set_test_pattern,
 		.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 02459a64ee21..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -325,6 +325,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
 		.enable_optc_clock = optc1_enable_optc_clock,
 		.set_drr = optc1_set_drr,
 		.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+		.set_vtotal_min_max = optc3_set_vtotal_min_max,
 		.set_static_screen_control = optc1_set_static_screen_control,
 		.program_stereo = optc1_program_stereo,
 		.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -365,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
 	optc1->min_h_sync_width = 4;
 	optc1->min_v_sync_width = 1;
 }
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index d873def1a8f9..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
 
 		// Setup manual flow control for EOF via TRIG_A
 		optc->funcs->setup_manual_trigger(optc);
-
 	} else {
 		REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
 				OTG_SET_V_TOTAL_MIN_MASK, 0,
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 1d36f0fceb3e..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -2941,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
 struct dmub_optc_state {
 	uint32_t v_total_max;
 	uint32_t v_total_min;
-	uint32_t v_total_mid;
-	uint32_t v_total_mid_frame_num;
 	uint32_t tg_inst;
-	uint32_t enable_manual_trigger;
-	uint32_t clear_force_vsync;
 };
 
 struct dmub_rb_cmd_drr_update {
commit 1298d9ab848653fc35431581d6e36662c7b6935a
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 27 17:59:05 2022 -0400

    drm/amd/display: Add a missing hook to DCN20
    
    The struct timing_generator_funcs provides a hook for setting up the
    maximum possible vertical dimension of display for OTG, as the panel
    supports. DCN10 has a standard function named optc1_set_vtotal_min_max
    which all ASICs can use to set the aforementioned hook. Since we did not
    set it for DCN20, this commit initializes the set_vtotal_min_max with
    the DCN10 function.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
 		.enable_optc_clock = optc1_enable_optc_clock,
 		.set_drr = optc1_set_drr,
 		.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+		.set_vtotal_min_max = optc1_set_vtotal_min_max,
 		.set_static_screen_control = optc1_set_static_screen_control,
 		.program_stereo = optc1_program_stereo,
 		.is_stereo_left_eye = optc1_is_stereo_left_eye,
commit ba30b223c93ec5af63993b6397cd7316e5acb6c1
Author: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Date:   Tue Oct 4 13:30:51 2022 -0400

    drm/amd/display: always allow pstate change when no dpps are active on dcn315
    
    Prevents certain configs blocking s0i3 when streams aren't completely
    removed
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Charlene Liu <Charlene.Liu at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 87bfc42bdaaf..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
 		int pipe_cnt,
 		int vlevel)
 {
-	int i, pipe_idx;
+	int i, pipe_idx, active_dpp_count = 0;
 	double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
 
 	dc_assert_fp_enabled();
@@ -528,6 +528,9 @@ void dcn31_calculate_wm_and_dlg_fp(
 		if (!context->res_ctx.pipe_ctx[i].stream)
 			continue;
 
+		if (context->res_ctx.pipe_ctx[i].plane_state)
+			active_dpp_count++;
+
 		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
 		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 
@@ -544,9 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
 	}
 
 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-	/* For 31x apu pstate change is only supported if possible in vactive */
+	/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
-			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
+			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
 }
 
 void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
commit 6f4f8ff567c48823f8279206e236643e8e8f377e
Author: Meenakshikumar Somasundaram <meenakshikumar.somasundaram at amd.com>
Date:   Thu Sep 29 23:55:41 2022 -0400

    drm/amd/display: Display does not light up after S4 resume
    
    [Why]
    Dpia hpd interrupt processing is disabled when entering S4/S0i3 and
    would be reenabled after detection completes during resuming. Because,
    keeping hpd interrupts enabled during detection leads to multiple
    detections for the same hpd transition. There is a S4 case where dpia
    hpd interrupt is missed when driver is in transitioning from hpd
    interrupt processing disable to enable and the display does not light
    up.
    
    [How]
    - Added dmub inbox command DMUB_CMD__DPIA_HPD_INT_ENABLE to explicitly
    control dmub to issue dpia hpd interrupt or not. If dpia hpd interrupt
    is disabled, dmub will keep the hpd pending and post it once driver
    reenables dpia hpd interrupt or when querying with
    DMUB_CMD__QUERY_HPD_STATE.
    - Added dmub boot option dpia_hpd_int_enable_supported to notify dmub
    about whether DMUB_CMD__DPIA_HPD_INT_ENABLE command would be used.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Mustapha Ghaddar <Mustapha.Ghaddar at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Meenakshikumar Somasundaram <meenakshikumar.somasundaram at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b5058a2ce7e8..660316a536f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -4656,6 +4656,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
 	return DC_OK;
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ *  @brief
+ *		Submits dpia hpd int enable command to dmub via inbox message
+ *
+ *  @param
+ *		[in] dc: dc structure
+ *		[in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ *	@return
+ *		None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+				uint32_t hpd_int_enable)
+{
+	union dmub_rb_cmd cmd = {0};
+	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+	cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+	cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+	dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+	dc_dmub_srv_cmd_execute(dmub_srv);
+	dc_dmub_srv_wait_idle(dmub_srv);
+
+	DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
 /**
  * dc_disable_accelerated_mode - disable accelerated mode
  * @dc: dc structure
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b0afcff94591..5d0103e20412 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1612,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
 				uint8_t mst_alloc_slots,
 				uint8_t *mst_slots_in_use);
 
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+				uint32_t hpd_int_enable);
+
 /*******************************************************************************
  * DSC Interfaces
  ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
 	bool disable_dpia;
 	bool usb4_cm_version;
 	bool fw_in_system_memory;
+	bool dpia_hpd_int_enable_supported;
 };
 
 /**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 834707dfc189..1d36f0fceb3e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
 		uint32_t diag_env: 1; /* 1 if diagnostic environment */
 		uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
 		uint32_t usb4_cm_version: 1; /**< 1 CM support */
+		uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
 
-		uint32_t reserved : 17; /**< reserved */
+		uint32_t reserved : 16; /**< reserved */
 	} bits; /**< boot bits */
 	uint32_t all; /**< 32-bit access to bits */
 };
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
 	/**
 	 * Command type used for all VBIOS interface commands.
 	 */
+
+	/**
+	 * Command type used to set DPIA HPD interrupt state
+	 */
+	DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
 	DMUB_CMD__VBIOS = 128,
 };
 
@@ -1255,6 +1262,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
 	struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */
 };
 
+/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+	struct dmub_cmd_header header; /* header */
+	uint32_t enable; /* dpia hpd interrupt enable */
+};
+
 /**
  * struct dmub_rb_cmd_dpphy_init - DPPHY init.
  */
@@ -3336,6 +3351,10 @@ union dmub_rb_cmd {
 	 * Definition of a DMUB_CMD__QUERY_HPD_STATE command.
 	 */
 	struct dmub_rb_cmd_query_hpd_state query_hpd;
+	/**
+	 * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+	 */
+	struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
 };
 
 /**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
 	boot_options.bits.dpia_supported = params->dpia_supported;
 	boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
 	boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+	boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
 	boot_options.bits.power_optimization = params->power_optimization;
 
 	boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
commit b73353f7f3d434e90da9f0e127bba1fe26cb1287
Author: Max Tseng <Max.Tseng at amd.com>
Date:   Sun Oct 2 20:45:37 2022 +0800

    drm/amd/display: Use the same cursor info across features
    
    Since different features would need to update cursor registers, However,
    they would use different approaches.
    
    To unify varied methods, this refactor is implemented the same update
    cursor info method for current varied features.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Anthony Koo <Anthony.Koo at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Max Tseng <Max.Tseng at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ae13887756bf..9998f58c14b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -276,6 +276,8 @@ static void program_cursor_attributes(
 		}
 
 		dc->hwss.set_cursor_attribute(pipe_ctx);
+
+		dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
 		if (dc->hwss.set_cursor_sdr_white_level)
 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
 	}
@@ -382,6 +384,8 @@ static void program_cursor_position(
 		}
 
 		dc->hwss.set_cursor_position(pipe_ctx);
+
+		dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
 	}
 
 	if (pipe_to_program)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 89d7d3fd3321..bbde635c56fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
 #include "dc_hw_types.h"
 #include "core_types.h"
 #include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
 
 #define CTX dc_dmub_srv->ctx
 #define DC_LOGGER CTX->logger
@@ -880,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
 		diag_data.is_cw0_enabled,
 		diag_data.is_cw6_enabled);
 }
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+	if (pipe_ctx->plane_state != NULL) {
+		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+			return false;
+	}
+
+	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+		return true;
+
+	return false;
+}
+
+static void dc_build_cursor_update_payload0(
+		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+		struct dmub_cmd_update_cursor_payload0 *payload)
+{
+	struct hubp *hubp = pipe_ctx->plane_res.hubp;
+	unsigned int panel_inst = 0;
+
+	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+		pipe_ctx->stream->link, &panel_inst))
+		return;
+
+	/* Payload: Cursor Rect is built from position & attribute
+	 * x & y are obtained from postion
+	 */
+	payload->cursor_rect.x = hubp->cur_rect.x;
+	payload->cursor_rect.y = hubp->cur_rect.y;
+	/* w & h are obtained from attribute */
+	payload->cursor_rect.width  = hubp->cur_rect.w;
+	payload->cursor_rect.height = hubp->cur_rect.h;
+
+	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
+	payload->pipe_idx    = p_idx;
+	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+	payload->panel_inst  = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+		union dmub_rb_cmd *cmd)
+{
+	dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+	dc_dmub_srv_cmd_execute(dmub_srv);
+	dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+		const struct hubp *hubp, const struct dpp *dpp)
+{
+	/* Hubp */
+	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
+	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+	/* dpp */
+	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+	pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+		const struct hubp *hubp, const struct dpp *dpp)
+{
+	/* Hubp */
+	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
+	pl_A->aHubp.size.raw     = hubp->att.size.raw;
+	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+	/* dpp */
+	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+		struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+	union dmub_rb_cmd cmd = { 0 };
+	union dmub_cmd_update_cursor_info_data *update_cursor_info =
+					&cmd.update_cursor_info.update_cursor_info_data;
+
+	if (!dc_dmub_should_update_cursor_data(pCtx))
+		return;
+	/*
+	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
+	 * only assigned to store cursor attributes info.
+	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+	 * is to store cursor position info.
+	 *
+	 * Command heaer type must be the same type if using  multi_cmd_pending.
+	 * Besides, while process 2nd command in DMU, the sub type is useless.
+	 * So it's meanless to pass the sub type header with different type.
+	 */
+
+	{
+		/* Build Payload#0 Header */
+		cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+		cmd.update_cursor_info.header.payload_bytes =
+				sizeof(cmd.update_cursor_info.update_cursor_info_data);
+		cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+		/* Prepare Payload */
+		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+		dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+		/* Send update_curosr_info to queue */
+		dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+	}
+	{
+		/* Build Payload#1 Header */
+		memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+		cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+		cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+		cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+		dc_build_cursor_attribute_update_payload1(
+				&cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+		/* Combine 2nd cmds update_curosr_info to DMU */
+		dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+	}
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7e438345b1a8..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -88,4 +88,5 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
 void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
 
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
 #endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
 	REG_UPDATE(CURSOR0_CONTROL,
 			CUR0_ENABLE, cur_en);
 
+	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
 }
 
 void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 305e0c545374..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3372,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static bool dcn10_dmub_should_update_cursor_data(
-		struct pipe_ctx *pipe_ctx,
-		struct dc_debug_options *debug)
-{
-	if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
-		return false;
-
-	if (dcn10_can_pipe_disable_cursor(pipe_ctx))
-		return false;
-
-	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
-			&& pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
-		return true;
-
-	return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
-		struct pipe_ctx *pipe_ctx,
-		struct hubp *hubp,
-		const struct dc_cursor_mi_param *param,
-		const struct dc_cursor_position *cur_pos,
-		const struct dc_cursor_attributes *cur_attr)
-{
-	union dmub_rb_cmd cmd;
-	struct dmub_cmd_update_cursor_info_data *update_cursor_info;
-	const struct dc_cursor_position *pos;
-	const struct dc_cursor_attributes *attr;
-	int src_x_offset = 0;
-	int src_y_offset = 0;
-	int x_hotspot = 0;
-	int cursor_height = 0;
-	int cursor_width = 0;
-	uint32_t cur_en = 0;
-	unsigned int panel_inst = 0;
-
-	struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
-	if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
-		return;
-	/**
-	 * if cur_pos == NULL means the caller is from cursor_set_attribute
-	 * then driver use previous cursor position data
-	 * if cur_attr == NULL means the caller is from cursor_set_position
-	 * then driver use previous cursor attribute
-	 * if cur_pos or cur_attr is not NULL then update it
-	 */
-	if (cur_pos != NULL)
-		pos = cur_pos;
-	else
-		pos = &hubp->curs_pos;
-
-	if (cur_attr != NULL)
-		attr = cur_attr;
-	else
-		attr = &hubp->curs_attr;
-
-	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
-		return;
-
-	src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
-	src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
-	x_hotspot = pos->x_hotspot;
-	cursor_height = (int)attr->height;
-	cursor_width = (int)attr->width;
-	cur_en = pos->enable ? 1:0;
-
-	// Rotated cursor width/height and hotspots tweaks for offset calculation
-	if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
-		swap(cursor_height, cursor_width);
-		if (param->rotation == ROTATION_ANGLE_90) {
-			src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
-			src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
-		}
-	} else if (param->rotation == ROTATION_ANGLE_180) {
-		src_x_offset = pos->x - param->viewport.x;
-		src_y_offset = pos->y - param->viewport.y;
-	}
-
-	if (param->mirror) {
-		x_hotspot = param->viewport.width - x_hotspot;
-		src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
-	}
-
-	if (src_x_offset >= (int)param->viewport.width)
-		cur_en = 0;  /* not visible beyond right edge*/
-
-	if (src_x_offset + cursor_width <= 0)
-		cur_en = 0;  /* not visible beyond left edge*/
-
-	if (src_y_offset >= (int)param->viewport.height)
-		cur_en = 0;  /* not visible beyond bottom edge*/
-
-	if (src_y_offset + cursor_height <= 0)
-		cur_en = 0;  /* not visible beyond top edge*/
-
-	// Cursor bitmaps have different hotspot values
-	// There's a possibility that the above logic returns a negative value, so we clamp them to 0
-	if (src_x_offset < 0)
-		src_x_offset = 0;
-	if (src_y_offset < 0)
-		src_y_offset = 0;
-
-	memset(&cmd, 0x0, sizeof(cmd));
-	cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
-	cmd.update_cursor_info.header.payload_bytes =
-			sizeof(cmd.update_cursor_info.update_cursor_info_data);
-	update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
-	update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
-	update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
-	update_cursor_info->cursor_rect.width = attr->width;
-	update_cursor_info->cursor_rect.height = attr->height;
-	update_cursor_info->enable = cur_en;
-	update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
-	update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
-	update_cursor_info->panel_inst = panel_inst;
-	dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
-	dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
-	dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3727,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
 	}
 
-	dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
 }
@@ -3735,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
-	struct dc_cursor_mi_param param = { 0 };
-
-	/**
-	 * If enter PSR without cursor attribute update
-	 * the cursor attribute of dmub_restore_plane
-	 * are initial value. call dmub to exit PSR and
-	 * restore plane then update cursor attribute to
-	 * avoid override with initial value
-	 */
-	if (pipe_ctx->plane_state != NULL) {
-		param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
-		param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
-		param.viewport = pipe_ctx->plane_res.scl_data.viewport;
-		param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
-		param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
-		param.rotation = pipe_ctx->plane_state->rotation;
-		param.mirror = pipe_ctx->plane_state->horizontal_mirror;
-		dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
-	}
 
 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
 			pipe_ctx->plane_res.hubp, attributes);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
 			CURSOR0_DST_Y_OFFSET, 0,
 			 /* used to shift the cursor chunk request deadline */
 			CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+	hubp->att.SURFACE_ADDR_HIGH  = attr->address.high_part;
+	hubp->att.SURFACE_ADDR       = attr->address.low_part;
+	hubp->att.size.bits.width    = attr->width;
+	hubp->att.size.bits.height   = attr->height;
+	hubp->att.cur_ctl.bits.mode  = attr->color_format;
+	hubp->att.cur_ctl.bits.pitch = hw_pitch;
+	hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+	hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+	hubp->att.settings.bits.dst_y_offset  = 0;
+	hubp->att.settings.bits.chunk_hdl_adjust = 3;
 }
 
 void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
 	REG_SET(CURSOR_DST_OFFSET, 0,
 			CURSOR_DST_X_OFFSET, dst_x_offset);
 	/* TODO Handle surface pixel formats other than 4:4:4 */
+	/* Cursor Position Register Config */
+	hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+	hubp->pos.position.bits.x_pos = pos->x;
+	hubp->pos.position.bits.y_pos = pos->y;
+	hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+	hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+	hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+	/* Cursor Rectangle Cache
+	 * Cursor bitmaps have different hotspot values
+	 * There's a possibility that the above logic returns a negative value,
+	 * so we clamp them to 0
+	 */
+	if (src_x_offset < 0)
+		src_x_offset = 0;
+	if (src_y_offset < 0)
+		src_y_offset = 0;
+	/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+	hubp->cur_rect.x = src_x_offset + param->viewport.x;
+	hubp->cur_rect.y = src_y_offset + param->viewport.y;
 }
 
 void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
 		REG_UPDATE(CURSOR0_COLOR1,
 				CUR0_COLOR1, 0xFFFFFFFF);
 	}
+
+	dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+	dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+	dpp_base->att.cur0_ctl.bits.mode = color_format;
 }
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..0e7c5880e867
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,98 @@
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+	struct {
+		uint32_t     cur_enable: 1;
+		uint32_t         reser0: 3;
+		uint32_t cur_2x_magnify: 1;
+		uint32_t         reser1: 3;
+		uint32_t           mode: 3;
+		uint32_t         reser2: 5;
+		uint32_t          pitch: 2;
+		uint32_t         reser3: 6;
+		uint32_t line_per_chunk: 5;
+		uint32_t         reser4: 3;
+	} bits;
+	uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+	union reg_cursor_control_cfg cur_ctl;
+	union reg_position_cfg {
+		struct {
+			uint32_t x_pos: 16;
+			uint32_t y_pos: 16;
+		} bits;
+		uint32_t raw;
+	} position;
+	union reg_hot_spot_cfg {
+		struct {
+			uint32_t x_hot: 16;
+			uint32_t y_hot: 16;
+		} bits;
+		uint32_t raw;
+	} hot_spot;
+	union reg_dst_offset_cfg {
+		struct {
+			uint32_t dst_x_offset: 13;
+			uint32_t     reserved: 19;
+		} bits;
+		uint32_t raw;
+	} dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+	uint32_t SURFACE_ADDR_HIGH;
+	uint32_t SURFACE_ADDR;
+	union    reg_cursor_control_cfg  cur_ctl;
+	union    reg_cursor_size_cfg {
+		struct {
+			uint32_t  width: 16;
+			uint32_t height: 16;
+		} bits;
+		uint32_t raw;
+	} size;
+	union    reg_cursor_settings_cfg {
+		struct {
+			uint32_t     dst_y_offset: 8;
+			uint32_t chunk_hdl_adjust: 2;
+			uint32_t         reserved: 22;
+		} bits;
+		uint32_t raw;
+	} settings;
+};
+
+struct cursor_rect {
+	uint32_t x;
+	uint32_t y;
+	uint32_t w;
+	uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+	struct {
+		uint32_t     cur0_enable: 1;
+		uint32_t  expansion_mode: 1;
+		uint32_t          reser0: 1;
+		uint32_t     cur0_rom_en: 1;
+		uint32_t            mode: 3;
+		uint32_t        reserved: 25;
+	} bits;
+	uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+	union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+	union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+	struct  cursor_attribute_cache_hubp aHubp;
+	struct  cursor_attribute_cache_dpp  aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
 #define __DAL_DPP_H__
 
 #include "transform.h"
+#include "cursor_reg_cache.h"
 
 union defer_reg_writes {
 	struct {
@@ -58,6 +59,9 @@ struct dpp {
 
 	struct pwl_params shaper_params;
 	bool cm_bypass_mode;
+
+	struct cursor_position_cache_dpp  pos;
+	struct cursor_attribute_cache_dpp att;
 };
 
 struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
 #define __DAL_HUBP_H__
 
 #include "mem_input.h"
+#include "cursor_reg_cache.h"
 
 #define OPP_ID_INVALID 0xf
 #define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
 	struct dc_cursor_attributes curs_attr;
 	struct dc_cursor_position curs_pos;
 	bool power_gated;
+
+	struct cursor_position_cache_hubp  pos;
+	struct cursor_attribute_cache_hubp att;
+	struct cursor_rect cur_rect;
 };
 
 struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5d1aadade8a5..834707dfc189 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -760,11 +760,6 @@ enum dmub_cmd_dpia_type {
 	DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
 };
 
-enum dmub_cmd_header_sub_type {
-	DMUB_CMD__SUB_TYPE_GENERAL         = 0,
-	DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
-};
-
 #pragma pack(push, 1)
 
 /**
@@ -2089,7 +2084,99 @@ struct dmub_rb_cmd_update_dirty_rect {
 /**
  * Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
  */
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+	struct {
+		uint32_t     cur_enable: 1;
+		uint32_t         reser0: 3;
+		uint32_t cur_2x_magnify: 1;
+		uint32_t         reser1: 3;
+		uint32_t           mode: 3;
+		uint32_t         reser2: 5;
+		uint32_t          pitch: 2;
+		uint32_t         reser3: 6;
+		uint32_t line_per_chunk: 5;
+		uint32_t         reser4: 3;
+	} bits;
+	uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+	union dmub_reg_cursor_control_cfg cur_ctl;
+	union dmub_reg_position_cfg {
+		struct {
+			uint32_t cur_x_pos: 16;
+			uint32_t cur_y_pos: 16;
+		} bits;
+		uint32_t raw;
+	} position;
+	union dmub_reg_hot_spot_cfg {
+		struct {
+			uint32_t hot_x: 16;
+			uint32_t hot_y: 16;
+		} bits;
+		uint32_t raw;
+	} hot_spot;
+	union dmub_reg_dst_offset_cfg {
+		struct {
+			uint32_t dst_x_offset: 13;
+			uint32_t reserved: 19;
+		} bits;
+		uint32_t raw;
+	} dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+	struct {
+		uint32_t     cur0_enable: 1;
+		uint32_t  expansion_mode: 1;
+		uint32_t          reser0: 1;
+		uint32_t     cur0_rom_en: 1;
+		uint32_t            mode: 3;
+		uint32_t        reserved: 25;
+	} bits;
+	uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+	union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+	struct  dmub_cursor_position_cache_hubp pHubp;
+	struct  dmub_cursor_position_cache_dpp  pDpp;
+	uint8_t pipe_idx;
+	/*
+	 * Padding is required. To be 4 Bytes Aligned.
+	 */
+	uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+	uint32_t SURFACE_ADDR_HIGH;
+	uint32_t SURFACE_ADDR;
+	union    dmub_reg_cursor_control_cfg  cur_ctl;
+	union    dmub_reg_cursor_size_cfg {
+		struct {
+			uint32_t width: 16;
+			uint32_t height: 16;
+		} bits;
+		uint32_t raw;
+	} size;
+	union    dmub_reg_cursor_settings_cfg {
+		struct {
+			uint32_t     dst_y_offset: 8;
+			uint32_t chunk_hdl_adjust: 2;
+			uint32_t         reserved: 22;
+		} bits;
+		uint32_t raw;
+	} settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+	union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+	struct  dmub_cursor_attribute_cache_hubp aHubp;
+	struct  dmub_cursor_attribute_cache_dpp  aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
 	/**
 	 * Cursor dirty rects.
 	 */
@@ -2116,6 +2203,20 @@ struct dmub_cmd_update_cursor_info_data {
 	 * Currently the support is only for 0 or 1
 	 */
 	uint8_t panel_inst;
+	/**
+	 * Cursor Position Register.
+	 * Registers contains Hubp & Dpp modules
+	 */
+	struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+	struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+	struct dmub_cmd_update_cursor_payload0 payload0;
+	struct dmub_cmd_update_cursor_payload1 payload1;
 };
 /**
  * Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2128,7 +2229,7 @@ struct dmub_rb_cmd_update_cursor_info {
 	/**
 	 * Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
 	 */
-	struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+	union dmub_cmd_update_cursor_info_data update_cursor_info_data;
 };
 
 /**
commit 3867bbd44f2894a4e2b01286b3b378c058992cd7
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Sat Oct 1 11:51:48 2022 -0400

    drm/amd/display: Fix bug preventing FCLK Pstate allow message being sent
    
    [Why & How]
    FCLK pstate allow message should not be dependent on local
    "update_fclk".
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 99ae3255dcb9..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -342,8 +342,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 		if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
 			clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
 
-			/* To enable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
-			if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+			/* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+			if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
 				/* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
 				dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
 			}
commit d6170e418d1d3ae7e98cb6d96d1444e880131bbf
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Wed Sep 28 15:44:38 2022 -0400

    drm/amd/display: Acquire FCLK DPM levels on DCN32
    
    [Why & How]
    Acquire FCLK DPM levels to properly construct DML clock limits. Further
    add new logic to keep number of indices for each clock in clk_mgr.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 96d5e0d5b3ce..99ae3255dcb9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,7 +156,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
 {
 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 	unsigned int num_levels;
-	unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
+	struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
 
 	memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
 	clk_mgr_base->clks.p_state_change_support = true;
@@ -180,27 +180,28 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
 	/* DCFCLK */
 	dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
 			&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
-			&num_levels);
-	num_dcfclk_levels = num_levels;
+			&num_entries_per_clk->num_dcfclk_levels);
 
 	/* SOCCLK */
 	dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
 					&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
-					&num_levels);
+					&num_entries_per_clk->num_socclk_levels);
+
 	/* DTBCLK */
 	if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
 		dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
 				&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
-				&num_levels);
-	num_dtbclk_levels = num_levels;
+				&num_entries_per_clk->num_dtbclk_levels);
 
 	/* DISPCLK */
 	dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
 			&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
-			&num_levels);
-	num_dispclk_levels = num_levels;
+			&num_entries_per_clk->num_dispclk_levels);
+	num_levels = num_entries_per_clk->num_dispclk_levels;
 
-	if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+	if (num_entries_per_clk->num_dcfclk_levels &&
+			num_entries_per_clk->num_dtbclk_levels &&
+			num_entries_per_clk->num_dispclk_levels)
 		clk_mgr->dpm_present = true;
 
 	if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
@@ -383,7 +384,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 			/* to disable P-State switching, set UCLK min = max */
 			if (!clk_mgr_base->clks.p_state_change_support)
 				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
-						clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+						clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
 		}
 
 		/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
@@ -634,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
 					khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
 		else
 			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
-					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
 	} else {
 		dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
 				clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -650,22 +651,34 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
 		return;
 
 	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
-			clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+			clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
 }
 
 /* Get current memclk states, update bounding box */
 static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
 {
 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+	struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
 	unsigned int num_levels;
 
 	if (!clk_mgr->smu_present)
 		return;
 
-	/* Refresh memclk states */
+	/* Refresh memclk and fclk states */
 	dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
 			&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
-			&num_levels);
+			&num_entries_per_clk->num_memclk_levels);
+
+	dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+			&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+			&num_entries_per_clk->num_fclk_levels);
+
+	if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+		num_levels = num_entries_per_clk->num_memclk_levels;
+	} else {
+		num_levels = num_entries_per_clk->num_fclk_levels;
+	}
+
 	clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
 
 	if (clk_mgr->dpm_present && !num_levels)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d9f1b0a4fbd4..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
 	unsigned int wck_ratio;
 };
 
+struct clk_limit_num_entries {
+	unsigned int num_dcfclk_levels;
+	unsigned int num_fclk_levels;
+	unsigned int num_memclk_levels;
+	unsigned int num_socclk_levels;
+	unsigned int num_dtbclk_levels;
+	unsigned int num_dispclk_levels;
+	unsigned int num_dppclk_levels;
+	unsigned int num_phyclk_levels;
+	unsigned int num_phyclk_d18_levels;
+};
+
 /* This table is contiguous */
 struct clk_limit_table {
 	struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
-	unsigned int num_entries;
+	struct clk_limit_num_entries num_entries_per_clk;
+	unsigned int num_entries; /* highest populated dpm level for back compatibility */
 };
 
 struct wm_range_table_entry {
commit 876fcc4222e1d0e5b73343f4010a8b66be058f48
Author: Fangzhi Zuo <Jerry.Zuo at amd.com>
Date:   Tue Aug 30 12:12:53 2022 -0400

    drm/amd/display: Validate DSC After Enable All New CRTCs
    
    Before enabling new crtc, stream_count in dc_state does not sync with
    that in drm_atomic_state. Validating dsc in such case would leave newly
    added stream not jointly participating in dsc optimization with existing
    streams, but simply using default initialized vcpi all the time which
    gives wrong dsc determination decision.
    
    Consider the scenaio where one 4k60 connected to the dock under dp-alt mode.
    Since dp-alt mode is 2-lane setup, stream 1 consumes 63 slots with dsc needed.
    Then hook up a second 4k60 to the dock.
    stream 2 connected with 65 slot initialized by default without dsc.  dsc
    pre validate will not jointly optimize stream 2 with stream 1 before
    crtc 2 added into the dc_state. That leads to stream 2 not getting dsc
    optimization, and trigger atomic_check failure all the time, as 65 > 63
    limit.
    
    After getting all new crtcs added into the state, stream_count in
    dc_state correctly reflect that in drm_atomic_state which comes up with
    correct dsc decision.
    
    Fixes: 71be4b16d39a ("drm/amd/display: dsc validate fail not pass to atomic check")
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Roman Li <Roman.Li at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Fangzhi Zuo <Jerry.Zuo at amd.com>
    Tested-by: Mark Broadworth <mark.broadworth at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b84aedb707b8..f6a9e8fdd87d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9390,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				}
 			}
 		}
-		if (!pre_validate_dsc(state, &dm_state, vars)) {
-			ret = -EINVAL;
-			goto fail;
-		}
 	}
 #endif
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9527,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		}
 	}
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+	if (dc_resource_is_dsc_encoding_supported(dc)) {
+		if (!pre_validate_dsc(state, &dm_state, vars)) {
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+#endif
+
 	/* Run this here since we want to validate the streams we created */
 	ret = drm_atomic_helper_check_planes(dev, state);
 	if (ret) {
commit 20dad3813b3c15d118bda0496711eb7dff98e74a
Author: Jun Lei <jun.lei at amd.com>
Date:   Thu Sep 29 15:47:31 2022 -0400

    drm/amd/display: Add a helper to map ODM/MPC/Multi-Plane resources
    
    [Why & How]
    Add a helper to map ODM/MPC/Multi-Plane resources from DC
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Nevenko Stupar <Nevenko.Stupar at amd.com>
    Reviewed-by: Chaitanya Dhere <chaitanya.dhere at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Jun Lei <jun.lei at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8ee0d946bb2f..4a6e867369b8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
 
 	for (i = 0; i < stream_status->plane_count; i++) {
 		if (stream_status->plane_states[i] == plane_state) {
-
 			dc_plane_state_release(stream_status->plane_states[i]);
 			break;
 		}
@@ -3683,4 +3682,52 @@ bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
 				(stream->timing.h_sync_width % 2 == 0);
 	}
 	return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+		const struct dc *dc,
+		struct dc_state *state,
+		struct pipe_ctx *pri_pipe,
+		struct pipe_ctx *sec_pipe,
+		bool odm)
+{
+	int pipe_idx = sec_pipe->pipe_idx;
+	struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+	const struct resource_pool *pool = dc->res_pool;
+
+	sec_top = sec_pipe->top_pipe;
+	sec_bottom = sec_pipe->bottom_pipe;
+	sec_next = sec_pipe->next_odm_pipe;
+	sec_prev = sec_pipe->prev_odm_pipe;
+
+	*sec_pipe = *pri_pipe;
+
+	sec_pipe->top_pipe = sec_top;
+	sec_pipe->bottom_pipe = sec_bottom;
+	sec_pipe->next_odm_pipe = sec_next;
+	sec_pipe->prev_odm_pipe = sec_prev;
+
+	sec_pipe->pipe_idx = pipe_idx;
+	sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+	sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+	sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+	sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+	sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+	sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+	sec_pipe->stream_res.dsc = NULL;
+	if (odm) {
+		if (!sec_pipe->top_pipe)
+			sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+		else
+			sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+		if (sec_pipe->stream->timing.flags.DSC == 1) {
+			dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+			ASSERT(sec_pipe->stream_res.dsc);
+			if (sec_pipe->stream_res.dsc == NULL)
+				return false;
+		}
+		dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+	}
+
+	return true;
 }
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 66b7482d2e72..b0afcff94591 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1191,6 +1191,8 @@ struct dc_plane_state {
 	enum dc_irq_source irq_source;
 	struct kref refcount;
 	struct tg_color visual_confirm_color;
+
+	bool is_statically_allocated;
 };
 
 struct dc_plane_info {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 819de0f11012..2a3f5a485b2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1372,7 +1372,7 @@ static struct pipe_ctx *dcn32_find_split_pipe(
 	return pipe;
 }
 
-static bool dcn32_split_stream_for_mpc_or_odm(
+bool dcn32_split_stream_for_mpc_or_odm(
 		const struct dc *dc,
 		struct resource_context *res_ctx,
 		struct pipe_ctx *pri_pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 1fd7ad853210..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
 #include "panel_cntl.h"
 
 #define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
 
 void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
 		uint32_t controller_id);
@@ -492,6 +494,8 @@ struct dcn_bw_output {
 	struct dcn_watermark_set watermarks;
 	struct dcn_bw_writeback bw_writeback;
 	int compbuf_size_kb;
+	unsigned int legacy_svp_drr_stream_index;
+	bool legacy_svp_drr_stream_index_valid;
 };
 
 union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c37d1141febe..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -230,4 +230,10 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
 
 bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
 
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+		const struct dc *dc,
+		struct dc_state *state,
+		struct pipe_ctx *pri_pipe,
+		struct pipe_ctx *sec_pipe,
+		bool odm);
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
commit fa28030a83a6302f8724cdbf0c477536b2101033
Author: Vladimir Stempen <vladimir.stempen at amd.com>
Date:   Thu Sep 29 13:32:50 2022 -0400

    drm/amd/display: increase hardware status wait time
    
    [Why]
    Diagnostics reports exceptions generated when timeout waiting for
    DISPCLK frequency divider change expires when testing ODM4to1.
    Diagnostics reports exceptions generated when timeout waiting for OTG
    busy status expires when disabling OTG during ODM4to1 test.
    
    [How]
    Increase HW status waiting time for DISPCLK frequency divider change and
    OTG busy status when disable OTG.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Ariel Bernstein <Eric.Bernstein at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Vladimir Stempen <vladimir.stempen at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
 	} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
 		REG_UPDATE(DENTIST_DISPCLK_CNTL,
 				DENTIST_DISPCLK_WDIVIDER, 126);
-		REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+		REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
 		for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 			struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
 
 	REG_UPDATE(DENTIST_DISPCLK_CNTL,
 			DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
-	REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+	REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
 	REG_UPDATE(DENTIST_DISPCLK_CNTL,
 			DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
 	REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index ec3989d37086..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,7 +151,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
 	/* CRTC disabled, so disable  clock. */
 	REG_WAIT(OTG_CLOCK_CONTROL,
 			OTG_BUSY, 0,
-			1, 100000);
+			1, 150000);
 
 	return true;
 }
commit e50f67cf5e168d92e24cfb61fb11f2f0a35708cd
Author: Aurabindo Pillai <aurabindo.pillai at amd.com>
Date:   Thu Sep 29 11:15:12 2022 -0400

    drm/amd/display: Do not trigger timing sync for phantom pipes
    
    [Why&How]
    Doing timing sync seqence for phantom pipes will not go through since
    they are not fully programmed like normal pipes. Skip the sequence on
    such pipes
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f4b3ec32a331..305e0c545374 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2244,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
 
 	for (i = 1; i < group_size; i++) {
+		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+			continue;
+
 		opp = grouped_pipes[i]->stream_res.opp;
 		tg = grouped_pipes[i]->stream_res.tg;
 		tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2254,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
 	for (i = 0; i < group_size; i++) {
 		if (grouped_pipes[i]->stream == NULL)
 			continue;
+
+		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+			continue;
+
 		grouped_pipes[i]->stream->vblank_synchronized = false;
 	}
 
-	for (i = 1; i < group_size; i++)
+	for (i = 1; i < group_size; i++) {
+		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+			continue;
+
 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
 				grouped_pipes[i]->stream_res.tg,
 				grouped_pipes[0]->stream_res.tg->inst);
+	}
 
 	DC_SYNC_INFO("Waiting for trigger\n");
 
@@ -2268,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
 	 * synchronized. Look at last pipe programmed to reset.
 	 */
 
-	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
-	for (i = 1; i < group_size; i++)
+	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+	for (i = 1; i < group_size; i++) {
+		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+			continue;
+
 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
 				grouped_pipes[i]->stream_res.tg);
+	}
 
 	for (i = 1; i < group_size; i++) {
+		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+			continue;
+
 		opp = grouped_pipes[i]->stream_res.opp;
 		tg = grouped_pipes[i]->stream_res.tg;
 		tg->funcs->get_otg_active_size(tg, &width, &height);
commit c19d3eace484ca5627817a1de85af1de06d538b6
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Wed Sep 28 16:33:47 2022 -0400

    drm/amd/display: Use correct pixel clock to program DTBCLK DTO's
    
    [Why?]
    Currently phy_pix_clk is used to program DTO's which is incorrect.
    
    [How?]
    Use the timing pixel clock to program DTO's correctly.
    
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 7d3147175ca2..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
 	enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
 
 	dto_params.otg_inst = tg->inst;
-	dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+	dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
 	dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
 	dto_params.timing = &pipe_ctx->stream->timing;
 	dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
commit 2fd23d467d4fb4e9bb3c3758ee49799f690f5f72
Author: Josip Pavic <Josip.Pavic at amd.com>
Date:   Fri Sep 23 15:29:07 2022 -0400

    drm/amd/display: do not compare integers of different widths
    
    [Why & How]
    Increase width of some variables to avoid comparing integers of
    different widths
    
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Josip Pavic <Josip.Pavic at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 955ca273cfe1..426b07edb426 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -206,8 +206,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
  */
 static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
 {
-	uint8_t i;
-	int j;
+	int i, j;
 	struct dc_stream_state *stream = NULL;
 	struct dc_plane_state *plane = NULL;
 	uint32_t cursor_size = 0;
commit 5ff32b52995155f91de582124485d0f0f8881363
Author: Martin Leung <Martin.Leung at amd.com>
Date:   Tue Sep 27 18:13:38 2022 -0400

    drm/amd/display: zeromem mypipe heap struct before using it
    
    [Why & How]
    bug was caused when moving variable from stack to
    heap because it was reusable and garbage was left
    over, so we need to zero mem
    
    Fixes: 7acc487ab57e ("drm/amd/display: reduce stack size in dcn32 dml (v2)")
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Martin Leung <Martin.Leung at amd.com>
    Cc: Alex Deucher <alexander.deucher at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 11d5750e15af..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
 				mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
 				mode_lib->vba.SREnterPlusExitTime);
 
+			memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
 			v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
 			v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
 			v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
commit eae2331899f9dcc923d37d1d753f2de847c92359
Author: Vladimir Stempen <vladimir.stempen at amd.com>
Date:   Thu Sep 22 15:03:05 2022 -0400

    drm/amd/display: properly configure DCFCLK when enable/disable Freesync
    
    [Why]
    Bandwidth validation is using Freesync parameters
    from previous Freesync state.
    Bandwidth validation ignores DCFCLK calculated
    after Freesync parameters are configured
    
    [How]
    Set Freesync bandwidth parameters to its default
    state before running bandwidth validation.
    Take DCFCLK calculated after Freesync bandwidth
    parameters are assigned and bandwidth is
    recalculated.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Reviewed-by: Nevenko Stupar <Nevenko.Stupar at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Vladimir Stempen <vladimir.stempen at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9585b25f10e5..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1805,6 +1805,13 @@ bool dcn32_validate_bandwidth(struct dc *dc,
 	int pipe_cnt = 0;
 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
 	struct mall_temp_config mall_temp_config;
+
+	/* To handle Freesync properly, setting FreeSync DML parameters
+	 * to its default state for the first stage of validation
+	 */
+	context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+	context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
 	DC_LOGGER_INIT(dc->ctx->logger);
 
 	/* For fast validation, there are situations where a shallow copy of
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6bdd509d292a..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1769,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 	int i, pipe_idx, vlevel_temp = 0;
 	double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
 	double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+	double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
 	bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
 			dm_dram_clock_change_unsupported;
 	unsigned int dummy_latency_index = 0;
@@ -1804,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 					dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
 			dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
 			maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
-			dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+			dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
 			pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
 					dm_dram_clock_change_unsupported;
 		}
@@ -1890,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 
+	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+		pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+	}
+
 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
 		min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
 		min_dram_speed_mts_margin = 160;
commit 99243fd1f3ca40d487209ac76241de0478962a9d
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Tue Sep 27 12:35:10 2022 -0400

    Revert "drm/amd/display: skip commit minimal transition state"
    
    This reverts commit e4e481e4d838f30985dd46d43ed195110ed265f5.
    
    [Why & How]
    The reverted commit creates memory leak and causes issue
    upon driver install.
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 40a34b600c8e..b5058a2ce7e8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3650,30 +3650,10 @@ static bool commit_minimal_transition_state(struct dc *dc,
 	bool temp_subvp_policy;
 	enum dc_status ret = DC_ERROR_UNEXPECTED;
 	unsigned int i, j;
-	unsigned int pipe_in_use = 0;
 
 	if (!transition_context)
 		return false;
 
-	/* check current pipes in use*/
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
-
-		if (pipe->plane_state)
-			pipe_in_use++;
-	}
-
-	/* When the OS add a new surface if we have been used all of pipes with odm combine
-	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
-	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
-	 * call it again. Otherwise return true to skip.
-	 *
-	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
-	 * enter/exit MPO when DCN still have enough resources.
-	 */
-	if (pipe_in_use != dc->res_pool->pipe_count)
-		return true;
-
 	if (!dc->config.is_vmin_only_asic) {
 		tmp_mpc_policy = dc->debug.pipe_split_policy;
 		dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
commit 4f5bdde386d3b8e9317df5562950e1b4fa177599
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Fri Sep 9 15:24:55 2022 -0400

    drm/amd/display: Update PMFW z-state interface for DCN314
    
    [Why]
    Request from PMFW to change the messaging format to specify whether we
    support z-state via individual bits.
    
    [How]
    Update the args we pass in the support message.
    
    Fixes: d5c6909e7460 ("drm/amd/display: Add DCN314 clock manager")
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Charlene Liu <Charlene.Liu at amd.com>
    Reviewed-by: Mario Limonciello <mario.limonciello at amd.com>
    Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
 	if (!clk_mgr->smu_present)
 		return;
 
-	if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
-			(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
-		support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
 	// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
 	// Arg[16] = Disallow Z9 -> new bit
 	switch (support) {
 
 	case DCN_ZSTATE_SUPPORT_ALLOW:
 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
-		param = 9;
+		param = (1 << 10) | (1 << 9) | (1 << 8);
 		break;
 
 	case DCN_ZSTATE_SUPPORT_DISALLOW:
 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
-		param = 8;
+		param = 0;
 		break;
 
 
 	case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
-		param = 0x00010008;
+		param = (1 << 10);
 		break;
 
 	default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 70b647b9b4d3..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -881,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
-	.disable_z10 = true, /*hw not support it*/
+	.disable_z10 = false,
+	.enable_z9_disable_interface = true,
 	.disable_dmcu = true,
 	.force_abm_enable = false,
 	.timing_trace = false,
commit 6094b9136ca9038b61e9c4b5d25cd5512ce50b34
Author: Shirish S <shirish.s at amd.com>
Date:   Fri Oct 7 20:31:49 2022 +0530

    drm/amd/display: explicitly disable psr_feature_enable appropriately
    
    [Why]
    If psr_feature_enable is set to true by default, it continues to be enabled
    for non capable links.
    
    [How]
    explicitly disable the feature on links that are not capable of the same.
    
    Fixes: 8c322309e48e9 ("drm/amd/display: Enable PSR")
    Signed-off-by: Shirish S <shirish.s at amd.com>
    Reviewed-by: Leo Li <sunpeng.li at amd.com>
    Reviewed-by: Mario Limonciello <mario.limonciello at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 5.15+

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 8ca10ab3dfc1..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
  */
 void amdgpu_dm_set_psr_caps(struct dc_link *link)
 {
-	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+	if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+		link->psr_settings.psr_feature_enabled = false;
 		return;
+	}
 
-	if (link->type == dc_connection_none)
+	if (link->type == dc_connection_none) {
+		link->psr_settings.psr_feature_enabled = false;
 		return;
+	}
 
 	if (link->dpcd_caps.psr_info.psr_version == 0) {
 		link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
commit c5e595e752b3a1c68cca57c3559521237332fbec
Author: Matthew Auld <matthew.auld at intel.com>
Date:   Tue Oct 4 14:19:13 2022 +0100

    drm/i915/display: handle migration for dpt
    
    On platforms like DG2, it looks like the dpt path here is missing the
    migrate-to-lmem step on discrete platforms.
    
    v2:
      - Move the vma_pin() under the for_i915_gem_ww(), otherwise the
        object can be moved after dropping the lock and then doing the pin.
    
    Fixes: 33e7a975103c ("drm/i915/xelpd: First stab at DPT support")
    Signed-off-by: Matthew Auld <matthew.auld at intel.com>
    Cc: Jianshui Yu <jianshui.yu at intel.com>
    Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Cc: Nirmoy Das <nirmoy.das at intel.com>
    Reviewed-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221004131916.233474-2-matthew.auld@intel.com
    (cherry picked from commit 5769f64ff09aab23a9045fa13b464fb5070d3fb2)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c86e5d4ee016..733972fab07f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct i915_gem_ww_ctx ww;
 	struct i915_vma *vma;
 	u32 alignment;
 	int ret;
 
+	/*
+	 * We are not syncing against the binding (and potential migrations)
+	 * below, so this vm must never be async.
+	 */
+	GEM_WARN_ON(vm->bind_async_flags);
+
 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
 		return ERR_PTR(-EINVAL);
 
@@ -37,29 +44,37 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
 
 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
-	ret = i915_gem_object_lock_interruptible(obj, NULL);
-	if (!ret) {
+	for_i915_gem_ww(&ww, ret, true) {
+		ret = i915_gem_object_lock(obj, &ww);
+		if (ret)
+			continue;
+
+		if (HAS_LMEM(dev_priv)) {
+			ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
+			if (ret)
+				continue;
+		}
+
 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
-		i915_gem_object_unlock(obj);
-	}
-	if (ret) {
-		vma = ERR_PTR(ret);
-		goto err;
-	}
+		if (ret)
+			continue;
 
-	vma = i915_vma_instance(obj, vm, view);
-	if (IS_ERR(vma))
-		goto err;
+		vma = i915_vma_instance(obj, vm, view);
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
+			continue;
+		}
 
-	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
-		ret = i915_vma_unbind_unlocked(vma);
-		if (ret) {
-			vma = ERR_PTR(ret);
-			goto err;
+		if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+			ret = i915_vma_unbind(vma);
+			if (ret)
+				continue;
 		}
-	}
 
-	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+		ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+		if (ret)
+			continue;
+	}
 	if (ret) {
 		vma = ERR_PTR(ret);
 		goto err;
commit 7023472834a39341460dae5c9b506c76c5940cad
Author: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Date:   Mon Oct 3 13:16:30 2022 +0100

    drm/i915/guc: Fix revocation of non-persistent contexts
    
    Patch which added graceful exit for non-persistent contexts missed the
    fact it is not enough to set the exiting flag on a context and let the
    backend handle it from there.
    
    GuC backend cannot handle it because it runs independently in the
    firmware and driver might not see the requests ever again. Patch also
    missed the fact some usages of intel_context_is_banned in the GuC backend
    needed replacing with newly introduced intel_context_is_schedulable.
    
    Fix the first issue by calling into backend revoke when we know this is
    the last chance to do it. Fix the second issue by replacing
    intel_context_is_banned with intel_context_is_schedulable, which should
    always be safe since latter is a superset of the former.
    
    v2:
     * Just call ce->ops->revoke unconditionally. (Andrzej)
    
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
    Fixes: 45c64ecf97ee ("drm/i915: Improve user experience and driver robustness under SIGINT or similar")
    Cc: Andrzej Hajda <andrzej.hajda at intel.com>
    Cc: John Harrison <John.C.Harrison at Intel.com>
    Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
    Cc: <stable at vger.kernel.org> # v6.0+
    Reviewed-by: Andrzej Hajda <andrzej.hajda at intel.com>
    Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003121630.694249-1-tvrtko.ursulin@linux.intel.com
    (cherry picked from commit 0add082cebac8555ee3972ba768ae5c01db7a498)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..e7148a994b3a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1383,14 +1383,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
 	 */
 	for_each_gem_engine(ce, engines, it) {
 		struct intel_engine_cs *engine;
-		bool skip = false;
 
-		if (exit)
-			skip = intel_context_set_exiting(ce);
-		else if (!persistent)
-			skip = intel_context_exit_nonpersistent(ce, NULL);
-
-		if (skip)
+		if ((exit || !persistent) && intel_context_revoke(ce))
 			continue; /* Already marked. */
 
 		/*
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 654a092ed3d6..e94365b08f1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
 	return ret;
 }
 
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
-				      struct i915_request *rq)
+bool intel_context_revoke(struct intel_context *ce)
 {
 	bool ret = intel_context_set_exiting(ce);
 
 	if (ce->ops->revoke)
-		ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+		ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8e2d70630c49..be09fb2e883a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
 	return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
 }
 
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
-				      struct i915_request *rq);
+bool intel_context_revoke(struct intel_context *ce);
 
 static inline bool
 intel_context_force_single_submission(const struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b..1db59eeb34db 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 	 * Corner case where requests were sitting in the priority list or a
 	 * request resubmitted after the context was banned.
 	 */
-	if (unlikely(intel_context_is_banned(ce))) {
+	if (unlikely(!intel_context_is_schedulable(ce))) {
 		i915_request_put(i915_request_mark_eio(rq));
 		intel_engine_signal_breadcrumbs(ce->engine);
 		return 0;
@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
 			      struct i915_request *rq)
 {
 	struct intel_context *ce = request_to_scheduling_context(rq);
-	int ret = 0;
+	int ret;
 
-	if (likely(!intel_context_is_banned(ce))) {
-		ret = __guc_wq_item_append(rq);
+	if (unlikely(!intel_context_is_schedulable(ce)))
+		return 0;
 
-		if (unlikely(ret == -EBUSY)) {
-			guc->stalled_request = rq;
-			guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
-		}
+	ret = __guc_wq_item_append(rq);
+	if (unlikely(ret == -EBUSY)) {
+		guc->stalled_request = rq;
+		guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
 	}
 
 	return ret;
@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
 	 * submitting all the requests generated in parallel.
 	 */
 	return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
-		intel_context_is_banned(ce);
+	       !intel_context_is_schedulable(ce);
 }
 
 static int guc_dequeue_one_context(struct intel_guc *guc)
@@ -966,7 +966,7 @@ register_context:
 		struct intel_context *ce = request_to_scheduling_context(last);
 
 		if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
-			     !intel_context_is_banned(ce))) {
+			     intel_context_is_schedulable(ce))) {
 			ret = try_context_registration(ce, false);
 			if (unlikely(ret == -EPIPE)) {
 				goto deadlk;
@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
 {
 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
 
-	if (intel_context_is_banned(ce))
+	if (!intel_context_is_schedulable(ce))
 		return;
 
 	GEM_BUG_ON(!intel_context_is_pinned(ce));
@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
 {
 	trace_intel_context_reset(ce);
 
-	if (likely(!intel_context_is_banned(ce))) {
+	if (likely(intel_context_is_schedulable(ce))) {
 		capture_error_state(guc, ce);
 		guc_context_replay(ce);
 	} else {
 		drm_info(&guc_to_gt(guc)->i915->drm,
-			 "Ignoring context reset notification of banned context 0x%04X on %s",
+			 "Ignoring context reset notification of exiting context 0x%04X on %s",
 			 ce->guc_id.id, ce->engine->name);
 	}
 }
commit 17d819e2828cacca2e4c909044eb9798ed379cd2
Author: Hamza Mahfooz <hamza.mahfooz at amd.com>
Date:   Wed Oct 5 11:30:38 2022 -0400

    Revert "drm/amdgpu: use dirty framebuffer helper"
    
    This reverts commit 66f99628eb24409cb8feb5061f78283c8b65f820.
    
    Unfortunately, that commit causes performance regressions on non-PSR
    setups. So, just revert it until FB_DAMAGE_CLIPS support can be added.
    
    Cc: stable at vger.kernel.org
    Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2189
    Link: https://bugzilla.kernel.org/show_bug.cgi?id=216554
    Fixes: 66f99628eb2440 ("drm/amdgpu: use dirty framebuffer helper")
    Fixes: abbc7a3dafb91b ("drm/amdgpu: don't register a dirty callback for non-atomic")
    Signed-off-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Acked-by: Alex Deucher <alexander.deucher at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 23998f727c7f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,8 +38,6 @@
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 };
 
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
-	.destroy = drm_gem_fb_destroy,
-	.create_handle = drm_gem_fb_create_handle,
-	.dirty = drm_atomic_helper_dirtyfb,
-};
-
 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
 					  uint64_t bo_flags)
 {
@@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
 	if (ret)
 		goto err;
 
-	if (drm_drv_uses_atomic_modeset(dev))
-		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
-	else
-		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
 	if (ret)
 		goto err;
 
commit 2302d507149f0ae7cc697089ab5675a2d4cf9d2a
Author: Philip Yang <Philip.Yang at amd.com>
Date:   Mon Oct 3 17:53:25 2022 -0400

    drm/amdgpu: Correct amdgpu_amdkfd_total_mem_size calculation
    
    amdkfd_total_mem_size is the size of total GPUs vram plus system memory
    to estimate page tables memory usage and leave enough VRAM room for page
    tables allocation.
    
    Calculate amdkfd_total_mem_size in amdgpu_amdkfd_device_probe is
    incorrect because adev->gmc.real_vram_size is still 0 called from
    amdgpu_device_ip_early_init. Move the calculation
    to amdgpu_amdkfd_device_init to get the correct VRAM size.
    
    Do reverse calculation in amdgpu_amdkfd_device_fini_sw to support
    hot-unplugging GPUs.
    
    Signed-off-by: Philip Yang <Philip.Yang at amd.com>
    Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9e98f3866edc..03bbfaa51cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 		return;
 
 	adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
-	if (adev->kfd.dev)
-		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 }
 
 /**
@@ -201,6 +198,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 		adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
 						adev_to_drm(adev), &gpu_resources);
 
+		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
 		INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
 	}
 }
@@ -210,6 +209,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
 	if (adev->kfd.dev) {
 		kgd2kfd_device_exit(adev->kfd.dev);
 		adev->kfd.dev = NULL;
+		amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
 	}
 }
 
commit 9a3c6067bd2ee2ca2652fbb0679f422f3c9109f9
Author: Philip Yang <Philip.Yang at amd.com>
Date:   Mon Oct 3 13:03:26 2022 -0400

    drm/amdgpu: Set vmbo destroy after pt bo is created
    
    Under VRAM usage pression, map to GPU may fail to create pt bo and
    vmbo->shadow_list is not initialized, then ttm_bo_release calling
    amdgpu_bo_vm_destroy to access vmbo->shadow_list generates below
    dmesg and NULL pointer access backtrace:
    
    Set vmbo destroy callback to amdgpu_bo_vm_destroy only after creating pt
    bo successfully, otherwise use default callback amdgpu_bo_destroy.
    
    amdgpu: amdgpu_vm_bo_update failed
    amdgpu: update_gpuvm_pte() failed
    amdgpu: Failed to map bo to gpuvm
    amdgpu 0000:43:00.0: amdgpu: Failed to map peer:0000:43:00.0 mem_domain:2
    BUG: kernel NULL pointer dereference, address:
     RIP: 0010:amdgpu_bo_vm_destroy+0x4d/0x80 [amdgpu]
     Call Trace:
      <TASK>
      ttm_bo_release+0x207/0x320 [amdttm]
      amdttm_bo_init_reserved+0x1d6/0x210 [amdttm]
      amdgpu_bo_create+0x1ba/0x520 [amdgpu]
      amdgpu_bo_create_vm+0x3a/0x80 [amdgpu]
      amdgpu_vm_pt_create+0xde/0x270 [amdgpu]
      amdgpu_vm_ptes_update+0x63b/0x710 [amdgpu]
      amdgpu_vm_update_range+0x2e7/0x6e0 [amdgpu]
      amdgpu_vm_bo_update+0x2bd/0x600 [amdgpu]
      update_gpuvm_pte+0x160/0x420 [amdgpu]
      amdgpu_amdkfd_gpuvm_map_memory_to_gpu+0x313/0x1130 [amdgpu]
      kfd_ioctl_map_memory_to_gpu+0x115/0x390 [amdgpu]
      kfd_ioctl+0x24a/0x5b0 [amdgpu]
    
    Signed-off-by: Philip Yang <Philip.Yang at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
 	 * num of amdgpu_vm_pt entries.
 	 */
 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
-	bp->destroy = &amdgpu_bo_vm_destroy;
 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 	if (r)
 		return r;
 
 	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
 	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+	/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+	 * is initialized.
+	 */
+	bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
 	return r;
 }
 
commit 312b4dc11d4f74bfe03ea25ffe04c1f2fdd13cb9
Author: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
Date:   Tue Oct 4 07:33:39 2022 -0700

    drm/amdgpu: Fix VRAM BO swap issue
    
    DRM buddy manager allocates the contiguous memory requests in
    a single block or multiple blocks. So for the ttm move operation
    (incase of low vram memory) we should consider all the blocks to
    compute the total memory size which compared with the struct
    ttm_resource num_pages in order to verify that the blocks are
    contiguous for the eviction process.
    
    v2: Added a Fixes tag
    v3: Rewrite the code to save a bit of calculations and
        variables (Christian)
    
    Fixes: c9cad937c0c5 ("drm/amdgpu: add drm buddy support to amdgpu")
    Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..dc262d2c2925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -424,8 +424,9 @@ error:
 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
 			       struct ttm_resource *mem)
 {
-	uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+	u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
 	struct amdgpu_res_cursor cursor;
+	u64 end;
 
 	if (mem->mem_type == TTM_PL_SYSTEM ||
 	    mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
 		return false;
 
 	amdgpu_res_first(mem, 0, mem_size, &cursor);
+	end = cursor.start + cursor.size;
+	while (cursor.remaining) {
+		amdgpu_res_next(&cursor, cursor.size);
 
-	/* ttm_resource_ioremap only supports contiguous memory */
-	if (cursor.size != mem_size)
-		return false;
+		/* ttm_resource_ioremap only supports contiguous memory */
+		if (end != cursor.start)
+			return false;
+
+		end = cursor.start + cursor.size;
+	}
 
-	return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+	return end <= adev->gmc.visible_vram_size;
 }
 
 /*
commit 7e4ab9fb2b9449ef01977e79157d06c8900f73fd
Author: Yuan Can <yuancan at huawei.com>
Date:   Tue Sep 27 13:39:08 2022 +0000

    drm/amd/display: Remove unused struct i2c_id_config_access
    
    After commit 5a8132b9f606 ("drm/amd/display: remove dead dc vbios code"),
    no one use struct i2c_id_config_access, so remove it.
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Yuan Can <yuancan at huawei.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 53b077b40d72..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -51,13 +51,6 @@
 #define LAST_RECORD_TYPE 0xff
 #define SMU9_SYSPLL0_ID  0
 
-struct i2c_id_config_access {
-	uint8_t bfI2C_LineMux:4;
-	uint8_t bfHW_EngineID:3;
-	uint8_t bfHW_Capable:1;
-	uint8_t ucAccess;
-};
-
 static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
 	struct atom_i2c_record *record,
 	struct graphics_object_i2c_info *info);
commit 5e69732d4a89928b7daaa651ad869cebee28bfff
Author: Dong Chenchen <dongchenchen2 at huawei.com>
Date:   Fri Sep 30 14:38:27 2022 +0800

    drm/amd/display: Removed unused variable 'sdp_stream_enable'
    
    Kernel test robot throws below warning ->
    
    drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c:
    In function 'dcn31_hpo_dp_stream_enc_update_dp_info_packets':
       drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c:439:14:
    warning: variable 'sdp_stream_enable' set but not used
    [-Wunused-but-set-variable]
       439 |         bool sdp_stream_enable = false;
    
    Removed unused variable 'sdp_stream_enable'.
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Reported-by: kernel test robot <lkp at intel.com>
    Signed-off-by: Dong Chenchen <dongchenchen2 at huawei.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index d71d89268a07..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
 {
 	struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
 	uint32_t dmdata_packet_enabled = 0;
-	bool sdp_stream_enable = false;
 
-	if (info_frame->vsc.valid) {
+	if (info_frame->vsc.valid)
 		enc->vpg->funcs->update_generic_info_packet(
 				enc->vpg,
 				0,  /* packetIndex */
 				&info_frame->vsc,
 				true);
-		sdp_stream_enable = true;
-	}
-	if (info_frame->spd.valid) {
+
+	if (info_frame->spd.valid)
 		enc->vpg->funcs->update_generic_info_packet(
 				enc->vpg,
 				2,  /* packetIndex */
 				&info_frame->spd,
 				true);
-		sdp_stream_enable = true;
-	}
-	if (info_frame->hdrsmd.valid) {
+
+	if (info_frame->hdrsmd.valid)
 		enc->vpg->funcs->update_generic_info_packet(
 				enc->vpg,
 				3,  /* packetIndex */
 				&info_frame->hdrsmd,
 				true);
-		sdp_stream_enable = true;
-	}
+
 	/* enable/disable transmission of packet(s).
 	 * If enabled, packet transmission begins on the next frame
 	 */
commit 7d30ccc7761cfcd6756aa0b760c5f5493038d30a
Author: Randy Dunlap <rdunlap at infradead.org>
Date:   Fri Sep 30 21:33:54 2022 -0700

    drm/amd/display: clean up dcn32_fpu.c kernel-doc
    
    Rectify multiple kernel-doc warnings in dcn32_fpu.c.
    E.g.:
    
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:247: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
        * Finds dummy_latency_index when MCLK switching using firmware based
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:484: warning: Function parameter or member 'phantom_stream' not described in 'dcn32_set_phantom_stream_timing'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:601: warning: Function parameter or member 'dc' not described in 'dcn32_assign_subvp_pipe'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:601: warning: Function parameter or member 'context' not described in 'dcn32_assign_subvp_pipe'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:601: warning: Function parameter or member 'index' not described in 'dcn32_assign_subvp_pipe'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2140: warning: Function parameter or member 'dc' not described in 'dcn32_update_bw_bounding_box_fpu'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2140: warning: Function parameter or member 'bw_params' not described in 'dcn32_update_bw_bounding_box_fpu'
    drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2140: warning: expecting prototype for dcn32_update_bw_bounding_box(). Prototype was for dcn32_update_bw_bounding_box_fpu() instead
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Reported-by: kernel test robot <lkp at intel.com>
    Signed-off-by: Randy Dunlap <rdunlap at infradead.org>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Cc: George Shen <george.shen at amd.com>
    Cc: Alvin Lee <alvin.lee2 at amd.com>
    Cc: Nevenko Stupar <Nevenko.Stupar at amd.com>
    Cc: Harry Wentland <harry.wentland at amd.com>
    Cc: Leo Li <sunpeng.li at amd.com>
    Cc: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Cc: amd-gfx at lists.freedesktop.org
    Cc: dri-devel at lists.freedesktop.org
    Cc: Alex Deucher <alexander.deucher at amd.com>
    Cc: Christian König <christian.koenig at amd.com>
    Cc: "Pan, Xinhui" <Xinhui.Pan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f3f98e9a0ce6..6bdd509d292a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -243,7 +243,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
 }
 
-/**
+/*
  * Finds dummy_latency_index when MCLK switching using firmware based
  * vblank stretch is enabled. This function will iterate through the
  * table of dummy pstate latencies until the lowest value that allows
@@ -290,15 +290,14 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
 /**
  * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
  * and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
  * @dc: [in] current dc state
  * @context: [in] new dc state
  * @pipes: [in] DML pipe params array
  * @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
  */
 void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
 					      struct dc_state *context,
@@ -331,8 +330,9 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
 }
 
 /**
- * *******************************************************************************************
- * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
  *
  * This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
  * ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
@@ -343,12 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
  * - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
  *   MPC is required
  *
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipe_e2e: DML pipe end to end context
- *
- * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
- *
- * *******************************************************************************************
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
  */
 uint8_t dcn32_predict_pipe_split(struct dc_state *context,
 				  display_e2e_pipe_params_st *pipe_e2e)
@@ -504,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
 }
 
 /**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
  *
  * Set timing params of the phantom stream based on calculated output from DML.
  * This function first gets the DML pipe index using the DC pipe index, then
@@ -517,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
  * that separately.
  *
  * - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
  */
 void dcn32_set_phantom_stream_timing(struct dc *dc,
 				     struct dc_state *context,
@@ -592,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
 }
 
 /**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
  *
  * This function assumes that a "used" pipe is a pipe that has
  * both a stream and a plane assigned to it.
  *
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
  */
 static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
 {
@@ -625,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
 }
 
 /**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
  *
  * We enter this function if we are Sub-VP capable (i.e. enough pipes available)
  * and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -639,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
  * for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
  * support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
  *
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
  */
 static bool dcn32_assign_subvp_pipe(struct dc *dc,
 				    struct dc_state *context,
@@ -711,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
 }
 
 /**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
  *
  * This function returns true if there are enough free pipes
  * to create the required phantom pipes for any given stream
@@ -723,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
  * pipe which can be used as the phantom pipe for the non pipe
  * split pipe.
  *
- * @dc: current dc state
- * @context: new dc state
- *
  * Return:
  * True if there are enough free pipes to assign phantom pipes to at least one
  * stream that does not already have phantom pipes assigned. Otherwise false.
@@ -764,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
 }
 
 /**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
  *
  * High level algorithm:
  * 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -772,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
  * pipes still allows for the maximum microschedule to fit in the active
  * region for both pipes.
  *
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
  */
 static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
 {
@@ -836,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
 }
 
 /**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
  *
  * High level algorithm:
  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -845,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
  * 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
  * then report the configuration as supported
  *
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
  */
 static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
 {
@@ -914,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
 
 
 /**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
  *
  * High level algorithm:
  * 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -922,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
  * then report the configuration as supported
  * 3. If the VBLANK display is DRR, then take the DRR static schedulability path
  *
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
  */
 static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
 {
@@ -1003,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
 }
 
 /**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
  *
  * Three cases:
  * 1. SubVP + SubVP
  * 2. SubVP + VBLANK (DRR checked internally)
  * 3. SubVP + VACTIVE (currently unsupported)
  *
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
  */
 static bool subvp_validate_static_schedulability(struct dc *dc,
 				struct dc_state *context,
@@ -2281,7 +2263,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
 	return 0;
 }
 
-/**
+/*
  * dcn32_update_bw_bounding_box
  *
  * This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
commit 8799c0be89ebb99a16098bdf618f49f817bef76a
Author: Yunxiang Li <Yunxiang.Li at amd.com>
Date:   Wed Sep 21 17:20:19 2022 -0400

    drm/amd/display: Fix vblank refcount in vrr transition
    
    manage_dm_interrupts disable/enable vblank using drm_crtc_vblank_off/on
    which causes drm_crtc_vblank_get in vrr_transition to fail, and later
    when drm_crtc_vblank_put is called the refcount on vblank will be messed
    up. Therefore move the call to after manage_dm_interrupts.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1247
    Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1380
    
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Yunxiang Li <Yunxiang.Li at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d5222d5e3a61..b84aedb707b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7479,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
 		 * We also need vupdate irq for the actual core vblank handling
 		 * at end of vblank.
 		 */
-		dm_set_vupdate_irq(new_state->base.crtc, true);
-		drm_crtc_vblank_get(new_state->base.crtc);
+		WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+		WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
 				 __func__, new_state->base.crtc->base.id);
 	} else if (old_vrr_active && !new_vrr_active) {
 		/* Transition VRR active -> inactive:
 		 * Allow vblank irq disable again for fixed refresh rate.
 		 */
-		dm_set_vupdate_irq(new_state->base.crtc, false);
+		WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
 		drm_crtc_vblank_put(new_state->base.crtc);
 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
 				 __func__, new_state->base.crtc->base.id);
@@ -8243,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		mutex_unlock(&dm->dc_lock);
 	}
 
-	/* Count number of newly disabled CRTCs for dropping PM refs later. */
-	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
-				      new_crtc_state, i) {
-		if (old_crtc_state->active && !new_crtc_state->active)
-			crtc_disable_count++;
-
-		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
-		/* For freesync config update on crtc state and params for irq */
-		update_stream_irq_parameters(dm, dm_new_crtc_state);
-
-		/* Handle vrr on->off / off->on transitions */
-		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
-						dm_new_crtc_state);
-	}
-
 	/**
 	 * Enable interrupts for CRTCs that are newly enabled or went through
 	 * a modeset. It was intentionally deferred until after the front end
@@ -8269,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 #ifdef CONFIG_DEBUG_FS
-		bool configure_crc = false;
 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+		struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+		/* Count number of newly disabled CRTCs for dropping PM refs later. */
+		if (old_crtc_state->active && !new_crtc_state->active)
+			crtc_disable_count++;
+
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+		/* For freesync config update on crtc state and params for irq */
+		update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+		crc_rd_wrk = dm->crc_rd_wrk;
 #endif
 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 		cur_crc_src = acrtc->dm_irq_params.crc_src;
 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 #endif
-		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
 		if (new_crtc_state->active &&
 		    (!old_crtc_state->active ||
@@ -8286,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 			dc_stream_retain(dm_new_crtc_state->stream);
 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
 			manage_dm_interrupts(adev, acrtc, true);
+		}
+		/* Handle vrr on->off / off->on transitions */
+		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
 
 #ifdef CONFIG_DEBUG_FS
+		if (new_crtc_state->active &&
+		    (!old_crtc_state->active ||
+		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 			/**
 			 * Frontend may have changed so reapply the CRC capture
 			 * settings for the stream.
 			 */
-			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
-				configure_crc = true;
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8307,12 +8306,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 				}
 #endif
-			}
-
-			if (configure_crc)
 				if (amdgpu_dm_crtc_configure_crc_source(
 					crtc, dm_new_crtc_state, cur_crc_src))
 					DRM_DEBUG_DRIVER("Failed to configure crc source");
+			}
 #endif
 		}
 	}
commit f6aa84b83aee629fbbbc4ea16c2c142caf920d5a
Author: Roman Li <roman.li at amd.com>
Date:   Thu Sep 29 14:37:00 2022 -0400

    drm/amd/display: Enable dpia support for dcn314
    
    [Why]
    DCN 3.1.4 supports DPIA.
    
    [How]
     - Set dpia_supported flag for dcn314 in dmub_hw_init()
     - Remove comment that becomes irrelevant after this change.
    
    Signed-off-by: Roman Li <roman.li at amd.com>
    Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas at amd.com>
    Reviewed-by: Mario Limonciello <mario.limonciello at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c73727e0b7d..d5222d5e3a61 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 		hw_params.fb[i] = &fb_info->fb[i];
 
 	switch (adev->ip_versions[DCE_HWIP][0]) {
-	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+	case IP_VERSION(3, 1, 3):
+	case IP_VERSION(3, 1, 4):
 		hw_params.dpia_supported = true;
 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
 		break;
commit 21a550de5faf9f54013334c9a6a7643b8fd80b36
Author: Ruili Ji <ruiliji2 at amd.com>
Date:   Mon Oct 3 17:39:45 2022 +0800

    drm/amdgpu: Enable F32_WPTR_POLL_ENABLE in mqd
    
    This patch is to fix the SDMA user queue doorbell missing issue on
    SDMA 6.0. F32_WPTR_POLL_ENABLE has to be set if doorbell mode is
    used. Otherwise ringing SDMA user queue doorbell can't wake up
    system from gfxoff.
    
    Signed-off-by: Ruili Ji <ruiliji2 at amd.com>
    Reviewed-by: Yifan Zhang <yifan1.zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db51230163c5..0150f66a5ae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -846,7 +846,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
 	m->sdmax_rlcx_rb_cntl =
 		order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
 		1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
-		4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+		4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+		1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
 
 	m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 26b53b6d673e..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -333,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 		<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
 		q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
 		1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
-		6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+		6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+		1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
 
 	m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
commit 525530ad9a7ec9aa34266e1429cc5ef9acb58e6c
Author: Yang Yingliang <yangyingliang at huawei.com>
Date:   Thu Sep 29 22:20:15 2022 +0800

    drm/amdgpu/sdma: add missing release_firmware() in amdgpu_sdma_init_microcode()
    
    In some error path in amdgpu_sdma_init_microcode(), release_firmware() is
    not called, the memory allocated in request_firmware() will be leaked,
    calling amdgpu_sdma_destroy_inst_ctx() which calls release_firmware() to
    avoid memory leak.
    
    Fixes: 15aa13056d11da ("drm/amdgpu: add function to init SDMA microcode")
    Signed-off-by: Yang Yingliang <yangyingliang at huawei.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 3949b7e3907f..43cf8632cc1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -222,8 +222,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
 		adev->sdma.instance[instance].fw->data;
 	version_major = le16_to_cpu(header->header_version_major);
 
-	if ((duplicate && instance) || (!duplicate && version_major > 1))
-		return -EINVAL;
+	if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+		err = -EINVAL;
+		goto out;
+	}
 
 	err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
 	if (err)
@@ -272,7 +274,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
 				ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
 			break;
 		default:
-			return -EINVAL;
+			err = -EINVAL;
 		}
 	}
 
commit 8c39634d28fa460869702b9801d2efe06671b342
Author: Yang Li <yang.lee at linux.alibaba.com>
Date:   Fri Sep 30 13:38:59 2022 +0800

    drm/amd/display: clean up one inconsistent indenting
    
    clean up one inconsistent indenting
    
    Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=2321
    Reported-by: Abaci Robot <abaci at linux.alibaba.com>
    Signed-off-by: Yang Li <yang.lee at linux.alibaba.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 559e563d5bc1..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -852,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
 		vmid->masks = &vmid_masks;
 	}
 
-	 hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+	hubbub3->num_vmid = res_cap_dcn301.num_vmid;
 
 	return &hubbub3->base;
 }
commit 95c985ffc63e2a7d8f6aa18f9351f5010a8d1adb
Author: Yang Li <yang.lee at linux.alibaba.com>
Date:   Fri Sep 30 13:38:58 2022 +0800

    drm/amd/display: clean up one inconsistent indenting
    
    clean up one inconsistent indenting
    
    Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=2238
    Reported-by: Abaci Robot <abaci at linux.alibaba.com>
    Signed-off-by: Yang Li <yang.lee at linux.alibaba.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 6658849d5b4e..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -1654,7 +1654,7 @@ static bool dcn321_resource_construct(
 
 #undef REG_STRUCT
 #define REG_STRUCT dccg_regs
-		dccg_regs_init();
+	dccg_regs_init();
 
 
 	ctx->dc_bios->regs = &bios_regs;
commit 7e6d5cf8e3e3f8050de52a28236d5a172caf2da9
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Thu Sep 22 10:08:11 2022 -0400

    drm/amd/display: Minor code style change
    
    This commit adds some minor code style changes just to reduce the merge
    conflicts we have when we upstream some of the VBA code.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 8316b1b914c6..11d5750e15af 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -2476,8 +2476,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
 		}
 
-		m = 0;
-
 		for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
 			for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
 				for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2854,8 +2852,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 		}
 	}
 
-	m = 0;
-
 	//Calculate Return BW
 	for (i = 0; i < (int) v->soc.num_states; ++i) {
 		for (j = 0; j <= 1; ++j) {
@@ -3616,11 +3612,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 			mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
 					|| mode_lib->vba.ModeSupport[i][1] == true;
 
-			if (mode_lib->vba.ModeSupport[i][0] == true) {
+			if (mode_lib->vba.ModeSupport[i][0] == true)
 				MaximumMPCCombine = 0;
-			} else {
+			else
 				MaximumMPCCombine = 1;
-			}
 		}
 	}
 
commit a3daede47576037ff7bbbe9cbd36e52a71d92bc8
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Thu Sep 22 10:06:48 2022 -0400

    drm/amd/display: update DSC for DCN32
    
    Update DSC checks in the DCN32 VBA.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 75be1e1ce543..8316b1b914c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -2252,9 +2252,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
 		if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
 				|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
-				|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
-				|| mode_lib->vba.DSCInputBitPerComponent[k] >
-				mode_lib->vba.MaximumDSCBitsPerComponent)) {
+				|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+				|| mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
 			mode_lib->vba.NonsupportedDSCInputBPC = true;
 		}
 	}
@@ -2330,16 +2329,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
 					mode_lib->vba.BPPForMultistreamNotIndicated = true;
 				for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
-					if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+					if (mode_lib->vba.OutputMultistreamId[k] == j
 						&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
 						mode_lib->vba.BPPForMultistreamNotIndicated = true;
 				}
 			}
 
 			if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
-				if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+				if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
 					mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
 				for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
 					if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
 						mode_lib->vba.MultistreamWithHDMIOreDP = true;
commit 07ebc18c047adcd72905619e72ae7c48db28ab48
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Thu Sep 22 09:24:12 2022 -0400

    drm/amd/display: Disconnect DSC for unused pipes during ODM transition
    
    [Why]
    During transition from ODM combine to ODM bypass, if DSC is enabled need
    to disconnect the DSC mux for pipes no longer in use.
    
    [How]
    During ODM update, detect pipes with DSC that are no longer being used
    for new state and call new DSC interface to disconnect.
    
    Add new DSC interface to disconnect from pipe
    
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 33bdf56b2b3a..955ca273cfe1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -1148,16 +1148,19 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
 				true);
 	}
 
-	// Don't program pixel clock after link is already enabled
-/*	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
-			pipe_ctx->clock_source,
-			&pipe_ctx->stream_res.pix_clk_params,
-			&pipe_ctx->pll_settings)) {
-		BREAK_TO_DEBUGGER();
-	}*/
+	if (pipe_ctx->stream_res.dsc) {
+		struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
 
-	if (pipe_ctx->stream_res.dsc)
 		update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+		/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+		if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+				current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+			struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+			/* disconnect DSC block from stream */
+			dsc->funcs->dsc_disconnect(dsc);
+		}
+	}
 }
 
 unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
commit 9691a7a776302c85c10294f1a92c15c7f57a5947
Author: Martin Leung <Martin.Leung at amd.com>
Date:   Mon May 23 14:57:30 2022 -0400

    drm/amd/display: unblock mcm_luts
    
    why and how:
    needed to fix bad assumption for enable mcm_luts
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Martin Leung <Martin.Leung at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 218927d6ecb2..33bdf56b2b3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -630,10 +630,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
 			params = &dpp_base->degamma_params;
 	}
 
-	result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+	dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
 
-	if (result &&
-			pipe_ctx->stream_res.opp &&
+	if (pipe_ctx->stream_res.opp &&
 			pipe_ctx->stream_res.opp->ctx &&
 			hws->funcs.set_mcm_luts)
 		result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
commit 36939c94689ae7e6aaa9a0fa37e5c41616f76665
Author: Aric Cyr <aric.cyr at amd.com>
Date:   Mon Sep 26 10:21:48 2022 -0400

    drm/amd/display: 3.2.206
    
    This version brings along the following:
    - ILR improvements
    - PSR fixes
    - DCN315 fixes
    - DCN32 fixes
    - ODM fixes
    - DSC fixes
    - SubVP fixes
    
    Reviewed-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Aric Cyr <aric.cyr at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 458a4f431ac6..66b7482d2e72 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.205"
+#define DC_VER "3.2.206"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
commit 54fae65ff469a79fc0ca46f480c4e7fce50f3963
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 16:06:36 2022 -0400

    drm/amd/display: Enable 2 to 1 ODM policy if supported
    
    If the current configuration supports 2 to 1 ODM policy, let's also
    enable the windowed MPO feature.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 8012a48859b5..218927d6ecb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -991,6 +991,10 @@ void dcn32_init_hw(struct dc *dc)
 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
 	}
+
+	/* Enable support for ODM and windowed MPO if policy flag is set */
+	if (dc->debug.enable_single_display_2to1_odm_policy)
+		dc->config.enable_windowed_mpo_odm = true;
 }
 
 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
commit 11895d32ffddb50152f0a1e671d36b7f60e4daba
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 15:46:58 2022 -0400

    drm/amd/display: Add PState change high hook for DCN32
    
    For some reason, we missed the PState check for DCN32 which may cause
    issues for clock transition. This commit add that required hook.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index f6d3da475835..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -936,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
 	.program_watermarks = hubbub32_program_watermarks,
 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
 	.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
 	.init_watermarks = hubbub32_init_watermarks,
commit e626d9b9c6e038a6918aad1b5affd38f6b9deaed
Author: Sonny Jiang <sonny.jiang at amd.com>
Date:   Fri Sep 30 16:23:32 2022 -0400

    drm/amdgpu: Enable VCN PG on GC11_0_1
    
    Enable VCN PG on GC11_0_1
    
    Signed-off-by: Sonny Jiang <sonny.jiang at amd.com>
    Reviewed-by: James Zhu <James.Zhu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 16b757664a35..795706b3b092 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -629,6 +629,7 @@ static int soc21_common_early_init(void *handle)
 			AMD_CG_SUPPORT_JPEG_MGCG;
 		adev->pg_flags =
 			AMD_PG_SUPPORT_GFX_PG |
+			AMD_PG_SUPPORT_VCN |
 			AMD_PG_SUPPORT_VCN_DPG |
 			AMD_PG_SUPPORT_JPEG;
 		adev->external_rev_id = adev->rev_id + 0x1;
commit 46c87432e3d4cea8e1a7ac6e9e3ebd2462f47617
Author: Wenjing Liu <wenjing.liu at amd.com>
Date:   Thu Jun 23 16:09:25 2022 -0400

    drm/amd/display: Add missing mask sh for SYM32_TP_SQ_PULSE register
    
    There is a missing register mask in dcn32 causing the hardware
    programming is not executed when programming SQ_num test pattern for
    DP2.
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Wenjing Liu <wenjing.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
 	SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
commit f638fe27b817c755e017b8a6ae4b9b4224461941
Author: George Shen <George.Shen at amd.com>
Date:   Thu Jun 2 11:10:25 2022 -0400

    drm/amd/display: Add missing SDP registers to DCN32 reglist
    
    [Why]
    Certain features require the additional DP SDP configuration registers
    DP_SEC_CNTL1 and DP_SEC_CNTL5 in order to function correctly.
    
    The DCN32 DIO stream encoder reglist is currently missing these two
    registers.
    
    [How]
    Add the missing registers to the DCN32 DIO stream encoder reglist.
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: George Shen <George.Shen at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index e80dd2b92503..20e5f016a45a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
 	SRI(DP_MSE_RATE_UPDATE, DP, id), \
 	SRI(DP_PIXEL_FORMAT, DP, id), \
 	SRI(DP_SEC_CNTL, DP, id), \
+	SRI(DP_SEC_CNTL1, DP, id), \
 	SRI(DP_SEC_CNTL2, DP, id), \
+	SRI(DP_SEC_CNTL5, DP, id), \
 	SRI(DP_SEC_CNTL6, DP, id), \
 	SRI(DP_STEER_FIFO, DP, id), \
 	SRI(DP_VID_M, DP, id), \
commit fe674c0b6f5382b7c377ca2c418c26dd78b428b4
Author: Eric Bernstein <eric.bernstein at amd.com>
Date:   Tue Jan 25 14:42:12 2022 -0500

    drm/amd/display: Fix disable DSC logic in the DIO code
    
    [Why]
    In DIO stream encoder, definition of DP_DSC_MODE is changed (only
    enable/disable) In OPTC, OTG_SET_V_TOTAL_MIN_MASK_EN is removed (same as
    DCN3.1)
    
    [How]
    In DIO stream encoder, update enc32_dp_set_dsc_config(). In OPTC, use
    DCN3.1 version for function interfaces .set_vrr_m_const and .set_drr
    
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Eric Bernstein <eric.bernstein at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 40e713c4e172..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -388,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-	REG_UPDATE(DP_DSC_CNTL,	DP_DSC_MODE, dsc_mode);
+	REG_UPDATE(DP_DSC_CNTL,	DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
 }
 
 /* this function read dsc related register fields to be logged later in dcn10_log_hw_state
commit 3f4dee59253a6882acde98a2a027e55f1330ae86
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Fri Sep 23 14:00:09 2022 -0400

    drm/amd/display: Fix merging dynamic ODM+MPO configs on DCN32
    
    [WHY&HOW?]
    When merging ODM pipes that are using MPO, we must copy the stream_res
    from the new top pipe to the bottom pipe so that the overlayed plane is
    not pointing to the wrong stream assets.
    
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a56ee04f7df9..f3f98e9a0ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1598,6 +1598,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
 					/*MPC split rules will handle this case*/
 					pipe->bottom_pipe->top_pipe = NULL;
 				} else {
+					/* when merging an ODM pipes, the bottom MPC pipe must now point to
+					 * the previous ODM pipe and its associated stream assets
+					 */
 					if (pipe->prev_odm_pipe->bottom_pipe) {
 						/* 3 plane MPO*/
 						pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
@@ -1607,6 +1610,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
 						pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
 						pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
 					}
+
+					memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
 				}
 			}
 
commit f1b47f0004cfff051441aa93b7115d756d5eebb7
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 15:34:55 2022 -0400

    drm/amd/display: Adding missing HDMI ACP SEND register
    
    Add HDMI ACP bit field definition for DCN32.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..e80dd2b92503 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -106,6 +106,7 @@
 	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
 	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
 	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
 	SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
 	SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
 	SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
commit b33cd65df18f1cf60b066a02c09df92b4763bb31
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Wed Sep 21 08:47:44 2022 -0400

    drm/amd/display: Remove OPTC lock check
    
    At some point, we decided to blank HUBP during pixel data blank, and to
    handle that, we added some OPTC lock checks. Later, we realized that
    this change caused multiple regression, and we removed it. Nevertheless,
    we still have some leftovers that might affect some ASIC behavior, and
    this commit drops those changes to keep the code consistent.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ea7739255119..143a900d4d3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -679,16 +679,6 @@ void optc1_unlock(struct timing_generator *optc)
 			OTG_MASTER_UPDATE_LOCK, 0);
 }
 
-bool optc1_is_locked(struct timing_generator *optc)
-{
-	struct optc *optc1 = DCN10TG_FROM_TG(optc);
-	uint32_t locked;
-
-	REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
-	return (locked == 1);
-}
-
 void optc1_get_position(struct timing_generator *optc,
 		struct crtc_position *position)
 {
@@ -1583,7 +1573,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
 		.enable_crtc_reset = optc1_enable_crtc_reset,
 		.disable_reset_trigger = optc1_disable_reset_trigger,
 		.lock = optc1_lock,
-		.is_locked = optc1_is_locked,
 		.unlock = optc1_unlock,
 		.enable_optc_clock = optc1_enable_optc_clock,
 		.set_drr = optc1_set_drr,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 6323ca6dc3b3..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -654,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
 		bool enable_blanking);
 
 bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
 
 void optc1_program_blank_color(
 		struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..02459a64ee21 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,7 +319,6 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
 		.enable_crtc_reset = optc1_enable_crtc_reset,
 		.disable_reset_trigger = optc1_disable_reset_trigger,
 		.lock = optc3_lock,
-		.is_locked = optc1_is_locked,
 		.unlock = optc1_unlock,
 		.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
 		.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..d873def1a8f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -260,7 +260,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
 		.enable_crtc_reset = optc1_enable_crtc_reset,
 		.disable_reset_trigger = optc1_disable_reset_trigger,
 		.lock = optc3_lock,
-		.is_locked = optc1_is_locked,
 		.unlock = optc1_unlock,
 		.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
 		.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4ff1392633a7..1fd7ad853210 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -439,7 +439,6 @@ struct pipe_ctx {
 	union pipe_update_flags update_flags;
 	struct dwbc *dwbc;
 	struct mcif_wb *mcif_wb;
-	bool vtp_locked;
 };
 
 /* Data used for dynamic link encoder assignment.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
 	void (*set_blank)(struct timing_generator *tg,
 					bool enable_blanking);
 	bool (*is_blanked)(struct timing_generator *tg);
-	bool (*is_locked)(struct timing_generator *tg);
 	void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
 	void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
 	void (*set_colors)(struct timing_generator *tg,
commit 9114b55fabae5522b7124af4f16ea6ce6378aa19
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 14:27:04 2022 -0400

    drm/amd/display: Fix SubVP control flow in the MPO context
    
    SubVP has some issues related to how we allocate and enable it. This
    commit fixes this behavior by adding the proper check and configuration
    to the SubVP code path.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6216ceb790b4..40a34b600c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2946,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
 		dc_resource_state_copy_construct(
 				dc->current_state, context);
 
+		/* For each full update, remove all existing phantom pipes first.
+		 * Ensures that we have enough pipes for newly added MPO planes
+		 */
+		if (dc->res_pool->funcs->remove_phantom_pipes)
+			dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
 		/*remove old surfaces from context */
 		if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
 
@@ -3353,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
 		/* Since phantom pipe programming is moved to post_unlock_program_front_end,
 		 * move the SubVP lock to after the phantom pipes have been setup
 		 */
-		if (dc->hwss.subvp_pipe_control_lock)
-			dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+		if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+			if (dc->hwss.subvp_pipe_control_lock)
+				dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+		} else {
+			if (dc->hwss.subvp_pipe_control_lock)
+				dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+		}
+
 		return;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 7de511fd004b..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1860,24 +1860,6 @@ void dcn20_post_unlock_program_front_end(
 		}
 	}
 
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-		struct pipe_ctx *mpcc_pipe;
-
-		if (pipe->vtp_locked) {
-			dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
-			pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
-			pipe->vtp_locked = false;
-
-			for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
-				mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
-			for (i = 0; i < dc->res_pool->pipe_count; i++)
-				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
-					dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
-		}
-	}
-
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 752a4accb116..9585b25f10e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1680,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
 		phantom_plane->clip_rect.y = 0;
 		phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
 
+		phantom_plane->is_phantom = true;
+
 		dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
 
 		curr_pipe = curr_pipe->bottom_pipe;
@@ -1749,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
 			pipe->stream->mall_stream_config.type = SUBVP_NONE;
 			pipe->stream->mall_stream_config.paired_stream = NULL;
 		}
+
+		if (pipe->plane_state) {
+			pipe->plane_state->is_phantom = false;
+		}
 	}
 	return removed_pipe;
 }
commit 47b7dd9f68c12e7d33a0dfd3d9a5bed755097de0
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 13:29:19 2022 -0400

    drm/amd/display: Update DCN321 hook that deals with pipe aquire
    
    DCN provides a hook to check if we can have a new pipe allocation based
    on some DC constraints. If the current configuration supports the new
    pipe request, DC updates its context; otherwise, it will keep the same
    configuration. This behavior is similar across multiple ASICs, and for
    this reason, we reused DCN20 on DCN321. However, this DCN32x has some
    peculiarities which require its function to avoid weird pipe split
    issues. This commit update this issue by using
    dcn32_acquire_idle_pipe_for_head_pipe_in_layer instead of
    dcn20_acquire_idle_pipe_for_layer.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 910b63d874d5..6658849d5b4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -1604,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
 	.validate_bandwidth = dcn32_validate_bandwidth,
 	.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
 	.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
-	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+	.acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
 	.add_stream_to_ctx = dcn30_add_stream_to_ctx,
 	.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
 	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
commit a2909ff460a8e02168b3658372ebc897f7ab2315
Author: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Date:   Tue Sep 20 13:06:50 2022 -0400

    drm/amd/display: Drop unused code for DCN32/321
    
    Under DCN32/321 we identified some code paths that DC never executes.
    This commit removes those unused codes to avoid distractions when
    debugging issues.
    
    Reviewed-by: Aurabindo Pillai <aurabindo.pillai at amd.com>
    Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index fdae6aa89908..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -150,12 +150,6 @@ static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
 
 }
 
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
-	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
- 
 static const struct link_encoder_funcs dcn32_link_enc_funcs = {
 	.read_state = link_enc2_read_state,
 	.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
 	.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
 	.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
 	.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
-	.set_dig_output_mode = enc32_set_dig_output_mode,
 };
 
 void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
 	const struct dc_link_settings *link_settings,
 	enum clock_source_id clock_source);
 
-void enc32_set_dig_output_mode(
-		struct link_encoder *enc,
-		uint8_t pix_per_container);
-
 #endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 3195be9d38f5..40e713c4e172 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -411,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
 	}
 }
 
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
-	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t fifo_enabled;
-
-	REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
-	if (fifo_enabled == 0) {
-		/* reset DIG resync FIFO */
-		REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
-		/* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
-		//REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
-		udelay(1);
-		REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
-		REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
-	}
-}
-
 static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -458,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
 		enc3_stream_encoder_update_dp_info_packets,
 	.stop_dp_info_packets =
 		enc1_stream_encoder_stop_dp_info_packets,
-	.reset_fifo =
-		enc32_stream_encoder_reset_fifo,
 	.dp_blank =
 		enc1_stream_encoder_dp_blank,
 	.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 830562f4139d..f4b901d393eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -185,8 +185,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
 	.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
 	.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
 	.hubp_update_mall_sel = hubp32_update_mall_sel,
-	.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
-	.hubp_set_flip_int = hubp1_set_flip_int
+	.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
 };
 
 bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
 	.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
 	.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
 	.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
-	.set_dig_output_mode = enc32_set_dig_output_mode,
 };
 
 void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index aed0f689cbbf..910b63d874d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -94,8 +94,6 @@
 #include "dcn20/dcn20_vmid.h"
 
 #define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
 
 enum dcn321_clk_src_array_id {
 	DCN321_CLK_SRC_PLL0,
commit 2d550a159c55ac836a554fd605545b0feb5f7266
Author: Martin Leung <Martin.Leung at amd.com>
Date:   Fri Sep 23 17:55:14 2022 -0400

    drm/amd/display: block odd h_total timings from halving pixel rate
    
    why:
    when dynamic odm was turned on, there is also logic to halve the pixelclk
    this still turned on when we avoided odm in the case of odd h_total timings
    
    how:
    block the pixel clk mechanism also in the case of odd h_total timings
    
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Martin Leung <Martin.Leung at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 0e9dce414641..3195be9d38f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 	return two_pix;
 }
 
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+	/* math borrowed from function of same name in inc/resource
+	 * checks if h_timing is divisible by 2
+	 */
+
+	bool divisible = false;
+	uint16_t h_blank_start = 0;
+	uint16_t h_blank_end = 0;
+
+	if (timing) {
+		h_blank_start = timing->h_total - timing->h_front_porch;
+		h_blank_end = h_blank_start - timing->h_addressable;
+
+		/* HTOTAL, Hblank start/end, and Hsync start/end all must be
+		 * divisible by 2 in order for the horizontal timing params
+		 * to be considered divisible by 2. Hsync start is always 0.
+		 */
+		divisible = (timing->h_total % 2 == 0) &&
+				(h_blank_start % 2 == 0) &&
+				(h_blank_end % 2 == 0) &&
+				(timing->h_sync_width % 2 == 0);
+	}
+	return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+	/* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+	return is_h_timing_divisible_by_2(timing) &&
+		dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
 static void enc32_stream_encoder_dp_unblank(
         struct dc_link *link,
 		struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
 
 		/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
 		if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
-			|| dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+			|| is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
 			/*this logic should be the same in get_pixel_clock_parameters() */
 			n_multiply = 1;
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index a750343ca521..8012a48859b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -1161,7 +1161,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	unsigned int odm_combine_factor = 0;
-	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	bool two_pix_per_container = false;
 
 	// For phantom pipes, use the same programming as the main pipes
@@ -1189,7 +1188,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
 		} else {
 			*k1_div = PIXEL_RATE_DIV_BY_1;
 			*k2_div = PIXEL_RATE_DIV_BY_4;
-			if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+			if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
 				*k2_div = PIXEL_RATE_DIV_BY_2;
 		}
 	}
@@ -1226,7 +1225,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
 	struct dc_link *link = stream->link;
 	struct dce_hwseq *hws = link->dc->hwseq;
 	struct pipe_ctx *odm_pipe;
-	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	uint32_t pix_per_cycle = 1;
 
 	params.opp_cnt = 1;
@@ -1245,7 +1243,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
 				pipe_ctx->stream_res.tg->inst);
 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
 		if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
-			|| dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+			|| dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
 			params.timing.pix_clk_100hz /= 2;
 			pix_per_cycle = 2;
 		}
@@ -1262,6 +1260,9 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
 {
 	struct dc *dc = pipe_ctx->stream->ctx->dc;
 
+	if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+		return false;
+
 	if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
 		dc->debug.enable_dp_dig_pixel_rate_div_policy)
 		return true;
commit 51619c671316e96d7adaf2b6ea94ce245b81b6dd
Author: Aric Cyr <aric.cyr at amd.com>
Date:   Fri Sep 23 17:09:54 2022 -0400

    drm/amd/display: Fix vupdate and vline position calculation
    
    [how]
    Large deltas for periodic interrupts could result in the interrupt not
    being programmed properly and thus not firing.
    
    [why]
    Add proper wrap-around support for calculating VUPDATE and VLINE
    positions.
    
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Aric Cyr <aric.cyr at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 4390f6d7050f..f4b3ec32a331 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3818,28 +3818,14 @@ void dcn10_calc_vupdate_position(
 		uint32_t *start_line,
 		uint32_t *end_line)
 {
-	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
-	int vline_int_offset_from_vupdate =
-			pipe_ctx->stream->periodic_interrupt.lines_offset;
-	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
-	int start_position;
-
-	if (vline_int_offset_from_vupdate > 0)
-		vline_int_offset_from_vupdate--;
-	else if (vline_int_offset_from_vupdate < 0)
-		vline_int_offset_from_vupdate++;
+	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
 
-	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
-
-	if (start_position >= 0)
-		*start_line = start_position;
+	if (vupdate_pos >= 0)
+		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
 	else
-		*start_line = dc_crtc_timing->v_total + start_position - 1;
-
-	*end_line = *start_line + 2;
-
-	if (*end_line >= dc_crtc_timing->v_total)
-		*end_line = 2;
+		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+	*end_line = (*start_line + 2) % timing->v_total;
 }
 
 static void dcn10_cal_vline_position(
@@ -3848,23 +3834,27 @@ static void dcn10_cal_vline_position(
 		uint32_t *start_line,
 		uint32_t *end_line)
 {
-	switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
-	case START_V_UPDATE:
-		dcn10_calc_vupdate_position(
-				dc,
-				pipe_ctx,
-				start_line,
-				end_line);
-		break;
-	case START_V_SYNC:
+	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
+
+	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+		if (vline_pos > 0)
+			vline_pos--;
+		else if (vline_pos < 0)
+			vline_pos++;
+
+		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+		if (vline_pos >= 0)
+			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+		else
+			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+		*end_line = (*start_line + 2) % timing->v_total;
+	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
 		// vsync is line 0 so start_line is just the requested line offset
-		*start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
-		*end_line = *start_line + 2;
-		break;
-	default:
+		*start_line = vline_pos;
+		*end_line = (*start_line + 2) % timing->v_total;
+	} else
 		ASSERT(0);
-		break;
-	}
 }
 
 void dcn10_setup_periodic_interrupt(
commit baec651f4160f4c3f029edf84bbc18b4fcba9cf5
Author: Wenjing Liu <wenjing.liu at amd.com>
Date:   Thu Sep 22 14:36:32 2022 -0400

    drm/amd/display: write all 4 bytes of FFE_PRESET dpcd value
    
    [why]
    According to specs, it expects us to write all 4 bytes even if
    current lane count is less than 4.
    
    Reviewed-by: George Shen <George.Shen at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Wenjing Liu <wenjing.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index eb32e99fbde1..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
 	return status;
 }
 
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+		struct dc_link *link,
+		const struct link_training_settings *link_training_setting)
+{
+	enum dc_status status = core_link_write_dpcd(link,
+			DP_TRAINING_LANE0_SET,
+			(uint8_t *)(link_training_setting->dpcd_lane_settings),
+			sizeof(link_training_setting->dpcd_lane_settings));
+
+	DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+			__func__,
+			DP_TRAINING_LANE0_SET,
+			link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+	return status;
+}
+
+
 enum dc_status dpcd_set_lane_settings(
 	struct dc_link *link,
 	const struct link_training_settings *link_training_setting,
@@ -964,16 +981,6 @@ enum dc_status dpcd_set_lane_settings(
 		link_training_setting->link_settings.lane_count);
 
 	if (is_repeater(link_training_setting, offset)) {
-		if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
-				DP_128b_132b_ENCODING)
-			DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
-					" 0x%X TX_FFE_PRESET_VALUE = %x\n",
-					__func__,
-					offset,
-					lane0_set_address,
-					link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
-		else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
-				DP_8b_10b_ENCODING)
 		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
 				" 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
 			__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
 			link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
 
 	} else {
-		if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
-				DP_128b_132b_ENCODING)
-			DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
-					__func__,
-					lane0_set_address,
-					link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
-		else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
-				DP_8b_10b_ENCODING)
 		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
 			__func__,
 			lane0_set_address,
@@ -2023,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
 			result = DP_128b_132b_LT_FAILED;
 		} else {
 			dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
-			dpcd_set_lane_settings(link, lt_settings, DPRX);
+			dpcd_128b_132b_set_lane_settings(link, lt_settings);
 		}
 		loop_count++;
 	}
commit b808a7eb30b02e05023b505fe6db590ba799683f
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Tue Sep 20 20:50:49 2022 -0400

    drm/amd/display: Increase compbuf size prior to updating clocks
    
    [WHY?]
    Clocks are updating based on the incoming context's support, however the new
    compbuf size is not programmed prior to udpating clocks, which can result in
    P-State hangs.
    
    [HOW?]
    Increase compbuf size prior to updating clocks.
    
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1d271fe9e64..7de511fd004b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2018,6 +2018,10 @@ void dcn20_optimize_bandwidth(
 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
 
+	/* increase compbuf size */
+	if (hubbub->funcs->program_compbuf_size)
+		hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
 	dc->clk_mgr->funcs->update_clocks(
 			dc->clk_mgr,
 			context,
@@ -2033,9 +2037,6 @@ void dcn20_optimize_bandwidth(
 						pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
 		}
 	}
-	/* increase compbuf size */
-	if (hubbub->funcs->program_compbuf_size)
-		hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
 }
 
 bool dcn20_update_bandwidth(
commit 8cab4ef0ad9521030e1ae4bd294a1e2e6a04659f
Author: Lewis Huang <Lewis.Huang at amd.com>
Date:   Fri Sep 23 10:42:40 2022 +0800

    drm/amd/display: Keep OTG on when Z10 is disable
    
    [Why]
    Disable OTG when PSRSU with z10 even if z10 is disable
    
    [How]
    Reverse condition to keep OTG on when Z10 is disable
    
    Reviewed-by: Robin Chen <po-tchen at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Lewis Huang <Lewis.Huang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c4daef1e708c..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3378,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
 		case FAMILY_YELLOW_CARP:
 		case AMDGPU_FAMILY_GC_10_3_6:
 		case AMDGPU_FAMILY_GC_11_0_1:
-			if(!dc->debug.disable_z10)
-				psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+			if (dc->debug.disable_z10)
+				psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
 			break;
 		default:
 			psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
commit 4931ce22eca6ed5f8a3a3820fd13e586011ac219
Author: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Date:   Fri Sep 16 15:55:55 2022 -0400

    drm/amd/display: add dummy pstate workaround to dcn315
    
    DCN315 has to always allow pstate change or SMU will hang. This
    workaround achieves this by applying a low pstate change latency
    to be used when pstate is calculated to be unsupported. This lower
    latency only accounts for memory retraining; a previous change
    handles locking in the highest available pstate allowing us to minimize
    required latency hiding to only account for memory retraining.
    
    Reviewed-by: Charlene Liu <Charlene.Liu at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 3a3b2ac791c7..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1655,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
 	if (!pipes)
 		return false;
 
+	context->bw_ctx.dml.vba.maxMpcComb = 0;
+	context->bw_ctx.dml.vba.VoltageLevel = 0;
+	context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
 	dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
 	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
 
@@ -1873,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
 
 	if (repopulate_pipes)
 		pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+	context->bw_ctx.dml.vba.VoltageLevel = vlevel;
 	*vlevel_out = vlevel;
 	*pipe_cnt_out = pipe_cnt;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 0f71bb86dc9a..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -1731,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
 	.panel_cntl_create = dcn31_panel_cntl_create,
 	.validate_bandwidth = dcn31_validate_bandwidth,
 	.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
-	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+	.update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
 	.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
 	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
 	.add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 94b0842cd89b..87bfc42bdaaf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -292,6 +292,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
 	.urgent_latency_adjustment_fabric_clock_component_us = 0,
 	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
 	.num_chans = 4,
+	.dummy_pstate_latency_us = 10.0
 };
 
 struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -459,6 +460,23 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
 	}
 }
 
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+	dc_assert_fp_enabled();
+
+	if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+		/* For 315 pstate change is only supported if possible in vactive */
+		if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+			context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+		else
+			context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+				dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+		context->bw_ctx.dml.soc.sr_exit_time_us =
+				dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+	}
+}
+
 void dcn31_calculate_wm_and_dlg_fp(
 		struct dc *dc, struct dc_state *context,
 		display_e2e_pipe_params_st *pipes,
@@ -486,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 
-#if 0 // TODO
-	/* Set B:
-	 * TODO
-	 */
-	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
-		if (vlevel == 0) {
-			pipes[0].clks_cfg.voltage = 1;
-			pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
-		}
-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
-		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
-		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
-	}
-	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-	pipes[0].clks_cfg.voltage = vlevel;
-	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
-	/* Set C:
-	 * TODO
-	 */
-	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
-		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
-		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-	}
-	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-	/* Set D:
-	 * TODO
-	 */
-	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
-		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
-		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
-	}
-	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
 	/* Set A:
 	 * All clocks min required
 	 *
@@ -568,11 +520,9 @@ void dcn31_calculate_wm_and_dlg_fp(
 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-	/* TODO: remove: */
 	context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
 	context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
 	context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-	/* end remove*/
 
 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 		if (!context->res_ctx.pipe_ctx[i].stream)
@@ -594,6 +544,9 @@ void dcn31_calculate_wm_and_dlg_fp(
 	}
 
 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+	/* For 31x apu pstate change is only supported if possible in vactive */
+	context->bw_ctx.bw.dcn.clk.p_state_change_support =
+			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
 }
 
 void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
 				  int pipe_cnt);
 
 void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
 
 void dcn31_calculate_wm_and_dlg_fp(
 		struct dc *dc, struct dc_state *context,
commit e4e481e4d838f30985dd46d43ed195110ed265f5
Author: Zhikai Zhai <zhikai.zhai at amd.com>
Date:   Tue Sep 20 18:51:02 2022 +0800

    drm/amd/display: skip commit minimal transition state
    
    [WHY]
    Now dynamic ODM will now be disabled when MPO is required safe
    transitions to avoid underflow, but we are triggering the way of
    minimal transition too often. Commit state of dc with no check
    will do pipeline setup which may re-initialize the component with no
    need such as audio.
    
    [HOW]
    Just do the minimal transition when all of pipes are in use, otherwise
    return true to skip.
    
    Reviewed-by: Dillon Varone <Dillon.Varone at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Zhikai Zhai <zhikai.zhai at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a81a61f006c4..6216ceb790b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3638,10 +3638,30 @@ static bool commit_minimal_transition_state(struct dc *dc,
 	bool temp_subvp_policy;
 	enum dc_status ret = DC_ERROR_UNEXPECTED;
 	unsigned int i, j;
+	unsigned int pipe_in_use = 0;
 
 	if (!transition_context)
 		return false;
 
+	/* check current pipes in use*/
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
+
+		if (pipe->plane_state)
+			pipe_in_use++;
+	}
+
+	/* When the OS add a new surface if we have been used all of pipes with odm combine
+	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
+	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
+	 * call it again. Otherwise return true to skip.
+	 *
+	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
+	 * enter/exit MPO when DCN still have enough resources.
+	 */
+	if (pipe_in_use != dc->res_pool->pipe_count)
+		return true;
+
 	if (!dc->config.is_vmin_only_asic) {
 		tmp_mpc_policy = dc->debug.pipe_split_policy;
 		dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
commit 345d6493476615494bd79a8fe77661918ea7c61a
Author: Leo Chen <sancchen at amd.com>
Date:   Fri Sep 16 14:13:11 2022 -0400

    drm/amd/display: Add log for LTTPR
    
    [Why & How]
    Adding log for LTTPR to facilitate debugging.
    
    Reviewed-by: Charlene Liu <Charlene.Liu at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Leo Chen <sancchen at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 70456580eecc..eb32e99fbde1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -5090,6 +5090,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
 			(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
 		ASSERT(0);
 		link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+		DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
 	}
 
 	/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
@@ -5098,6 +5099,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
 	if (is_lttpr_present)
 		CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
 
+	DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
 	return is_lttpr_present;
 }
 
@@ -5134,6 +5136,7 @@ void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
 	} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
 		*override = LTTPR_MODE_NON_LTTPR;
 	}
+	DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
 }
 
 enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -5146,22 +5149,34 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
 		return LTTPR_MODE_NON_LTTPR;
 
 	if (vbios_lttpr_aware) {
-		if (vbios_lttpr_force_non_transparent)
+		if (vbios_lttpr_force_non_transparent) {
+			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
 			return LTTPR_MODE_NON_TRANSPARENT;
-		else
+		} else {
+			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
 			return LTTPR_MODE_TRANSPARENT;
+		}
 	}
 
 	if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
-			link->dc->caps.extended_aux_timeout_support)
+			link->dc->caps.extended_aux_timeout_support) {
+		DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
 		return LTTPR_MODE_NON_TRANSPARENT;
+	}
 
+	DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
 	return LTTPR_MODE_NON_LTTPR;
 }
 
 enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
 {
-	return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
+	enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+	if (dp_is_lttpr_present(link))
+		mode = LTTPR_MODE_NON_TRANSPARENT;
+
+	DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+	return mode;
 }
 
 static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5179,9 +5194,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
 	cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
 			link->dc, link->link_enc->transmitter);
 	if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
-			cmd.cable_id.header.ret_status == 1)
+			cmd.cable_id.header.ret_status == 1) {
 		cable_id->raw = cmd.cable_id.data.output_raw;
-
+		DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+	}
 	return cmd.cable_id.header.ret_status == 1;
 }
 
@@ -5228,6 +5244,7 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
 
 	lttpr_present = dp_is_lttpr_present(link) ||
 			(!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+	DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
 
 	/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
 	 * be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
commit 380202c84454e89d29a9abc670f09b9145617d58
Author: Alvin Lee <Alvin.Lee2 at amd.com>
Date:   Wed Sep 21 12:04:25 2022 -0400

    drm/amd/display: For SubVP pipe split case use min transition into MPO
    
    [Description]
    - For SubVP pipe split case we need to use a minimial transition
      when opening MPO video since we are transitioning from 4 pipes
      to 3 pipes where an OPP for a previous MPCC will change
    - Also save and restore mall config when doing fast_validate in case
      there was a shallow copy of the dc->current_state
    
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4ba2c1f95dcb..a81a61f006c4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3564,6 +3564,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
 
 	struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
 	bool force_minimal_pipe_splitting = false;
+	uint32_t i;
 
 	*is_plane_addition = false;
 
@@ -3595,6 +3596,36 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
 		}
 	}
 
+	/* For SubVP pipe split case when adding MPO video
+	 * we need to add a minimal transition. In this case
+	 * there will be 2 streams (1 main stream, 1 phantom
+	 * stream).
+	 */
+	if (cur_stream_status &&
+			dc->current_state->stream_count == 2 &&
+			stream->mall_stream_config.type == SUBVP_MAIN) {
+		bool is_pipe_split = false;
+
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+					(dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+					dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+				is_pipe_split = true;
+				break;
+			}
+		}
+
+		/* determine if minimal transition is required due to SubVP*/
+		if (surface_count > 0 && is_pipe_split) {
+			if (cur_stream_status->plane_count > surface_count) {
+				force_minimal_pipe_splitting = true;
+			} else if (cur_stream_status->plane_count < surface_count) {
+				force_minimal_pipe_splitting = true;
+				*is_plane_addition = true;
+			}
+		}
+	}
+
 	return force_minimal_pipe_splitting;
 }
 
@@ -3604,6 +3635,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
 	struct dc_state *transition_context = dc_create_state(dc);
 	enum pipe_split_policy tmp_mpc_policy;
 	bool temp_dynamic_odm_policy;
+	bool temp_subvp_policy;
 	enum dc_status ret = DC_ERROR_UNEXPECTED;
 	unsigned int i, j;
 
@@ -3618,6 +3650,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
 	temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
 	dc->debug.enable_single_display_2to1_odm_policy = false;
 
+	temp_subvp_policy = dc->debug.force_disable_subvp;
+	dc->debug.force_disable_subvp = true;
+
 	dc_resource_state_copy_construct(transition_base_context, transition_context);
 
 	//commit minimal state
@@ -3646,6 +3681,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
 		dc->debug.pipe_split_policy = tmp_mpc_policy;
 
 	dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+	dc->debug.force_disable_subvp = temp_subvp_policy;
 
 	if (ret != DC_OK) {
 		/*this should never happen*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 05de97ea855f..752a4accb116 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1798,14 +1798,32 @@ bool dcn32_validate_bandwidth(struct dc *dc,
 	int vlevel = 0;
 	int pipe_cnt = 0;
 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+	struct mall_temp_config mall_temp_config;
 	DC_LOGGER_INIT(dc->ctx->logger);
 
+	/* For fast validation, there are situations where a shallow copy of
+	 * of the dc->current_state is created for the validation. In this case
+	 * we want to save and restore the mall config because we always
+	 * teardown subvp at the beginning of validation (and don't attempt
+	 * to add it back if it's fast validation). If we don't restore the
+	 * subvp config in cases of fast validation + shallow copy of the
+	 * dc->current_state, the dc->current_state will have a partially
+	 * removed subvp state when we did not intend to remove it.
+	 */
+	if (fast_validate) {
+		memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+		dcn32_save_mall_state(dc, context, &mall_temp_config);
+	}
+
 	BW_VAL_TRACE_COUNT();
 
 	DC_FP_START();
 	out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
 	DC_FP_END();
 
+	if (fast_validate)
+		dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
 	if (pipe_cnt == 0)
 		goto validate_out;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index a24f538bdc4c..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -45,6 +45,17 @@
 extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
 extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
 
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+	struct mall_stream_config mall_stream_config[MAX_PIPES];
+	bool is_phantom_plane[MAX_PIPES];
+};
+
 struct dcn32_resource_pool {
 	struct resource_pool base;
 };
@@ -122,6 +133,15 @@ void dcn32_determine_det_override(struct dc *dc,
 
 void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
 	display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+		struct dc_state *context,
+		struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+		struct dc_state *context,
+		struct mall_temp_config *temp_config);
+
 /* definitions for run time init of reg offsets */
 
 /* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 7f318ced5dee..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -380,3 +380,74 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
 	} else
 		dcn32_determine_det_override(dc, context, pipes);
 }
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ *       validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+		struct dc_state *context,
+		struct mall_temp_config *temp_config)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (pipe->stream)
+			temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+		if (pipe->plane_state)
+			temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+	}
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+		struct dc_state *context,
+		struct mall_temp_config *temp_config)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (pipe->stream)
+			pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+		if (pipe->plane_state)
+			pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+	}
+}
commit 1178ac68dc2869a2f4192600b701de3d853272d2
Author: Ian Chen <ian.chen at amd.com>
Date:   Tue Aug 23 17:26:51 2022 +0800

    drm/amd/display: Refactor edp ILR caps codes
    
    We split out ILR config from "global" to "per-panel" config settings.
    
    Reviewed-by: Anthony Koo <Anthony.Koo at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Ian Chen <ian.chen at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 895c6e6bfeb8..c4daef1e708c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1307,7 +1307,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
 		}
 
 		if (link->connector_signal == SIGNAL_TYPE_EDP) {
-			// Init dc_panel_config
+			/* Init dc_panel_config by HW config */
+			if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+				dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+			/* Pickup base DM settings */
 			dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
 			// Override dc_panel_config if system has specific settings
 			dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index c57df45e83ff..70456580eecc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -5795,7 +5795,7 @@ void detect_edp_sink_caps(struct dc_link *link)
 	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
 	 */
 	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
-			(link->dc->debug.optimize_edp_link_rate ||
+			(link->panel_config.ilr.optimize_edp_link_rate ||
 			link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
 		// Read DPCD 00010h - 0001Fh 16 bytes at one shot
 		core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6744,7 +6744,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
 	ASSERT(link || crtc_timing); // invalid input
 
 	if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
-			!link->dc->debug.optimize_edp_link_rate)
+			!link->panel_config.ilr.optimize_edp_link_rate)
 		return false;
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2ecf36e6329b..458a4f431ac6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -821,7 +821,6 @@ struct dc_debug_options {
 	/* Enable dmub aux for legacy ddc */
 	bool enable_dmub_aux_for_legacy_ddc;
 	bool disable_fams;
-	bool optimize_edp_link_rate; /* eDP ILR */
 	/* FEC/PSR1 sequence enable delay in 100us */
 	uint8_t fec_enable_delay_in100us;
 	bool enable_driver_sequence_debug;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index bf5f9e2773bc..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -138,6 +138,10 @@ struct dc_panel_config {
 		bool disable_dsc_edp;
 		unsigned int force_dsc_edp_policy;
 	} dsc;
+	/* eDP ILR */
+	struct ilr {
+		bool optimize_edp_link_rate; /* eDP ILR */
+	} ilr;
 };
 /*
  * A link contains one or more sinks and their connected status.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
 		.usbc_combo_phy_reset_wa = true,
 		.dmub_command_table = true,
 		.use_max_lb = true,
-		.optimize_edp_link_rate = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
 		.use_max_lb = true
 };
 
+static const struct dc_panel_config panel_config_defaults = {
+		.ilr = {
+			.optimize_edp_link_rate = true,
+		},
+};
+
 enum dcn20_clk_src_array_id {
 	DCN20_CLK_SRC_PLL0,
 	DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
 	return &panel_cntl->base;
 }
 
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+	*panel_config = panel_config_defaults;
+}
+
 #define CTX ctx
 
 #define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
 	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
 	.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+	.get_panel_config_defaults = dcn21_get_panel_config_defaults,
 };
 
 static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8745132d6374..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,7 +888,6 @@ static const struct dc_debug_options debug_defaults_drv = {
 		}
 	},
 	.disable_z10 = true,
-	.optimize_edp_link_rate = true,
 	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
 	.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
 };
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
 	.use_max_lb = true
 };
 
+static const struct dc_panel_config panel_config_defaults = {
+	.ilr = {
+		.optimize_edp_link_rate = true,
+	},
+};
+
 static void dcn31_dpp_destroy(struct dpp **dpp)
 {
 	kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
 	return out;
 }
 
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+	*panel_config = panel_config_defaults;
+}
+
 static struct dc_cap_funcs cap_funcs = {
 	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
 };
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
 	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
 	.update_bw_bounding_box = dcn31_update_bw_bounding_box,
 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+	.get_panel_config_defaults = dcn31_get_panel_config_defaults,
 };
 
 static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 24ec71cbd3e3..70b647b9b4d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -914,7 +914,6 @@ static const struct dc_debug_options debug_defaults_drv = {
 			.afmt = true,
 		}
 	},
-	.optimize_edp_link_rate = true,
 	.seamless_boot_odm_combine = true
 };
 
@@ -936,6 +935,12 @@ static const struct dc_debug_options debug_defaults_diags = {
 	.use_max_lb = true
 };
 
+static const struct dc_panel_config panel_config_defaults = {
+	.ilr = {
+		.optimize_edp_link_rate = true,
+	},
+};
+
 static void dcn31_dpp_destroy(struct dpp **dpp)
 {
 	kfree(TO_DCN20_DPP(*dpp));
@@ -1675,6 +1680,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
 	DC_FP_END();
 }
 
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+	*panel_config = panel_config_defaults;
+}
+
 static struct resource_funcs dcn314_res_pool_funcs = {
 	.destroy = dcn314_destroy_resource_pool,
 	.link_enc_create = dcn31_link_encoder_create,
@@ -1697,6 +1707,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
 	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
 	.update_bw_bounding_box = dcn314_update_bw_bounding_box,
 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+	.get_panel_config_defaults = dcn314_get_panel_config_defaults,
 };
 
 static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..0f71bb86dc9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
 			.afmt = true,
 		}
 	},
-	.optimize_edp_link_rate = true,
 	.psr_power_use_phy_fsm = 0,
 };
 
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
 	.use_max_lb = true
 };
 
+static const struct dc_panel_config panel_config_defaults = {
+	.ilr = {
+		.optimize_edp_link_rate = true,
+	},
+};
+
 static void dcn31_dpp_destroy(struct dpp **dpp)
 {
 	kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
 	return pipe_cnt;
 }
 
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+	*panel_config = panel_config_defaults;
+}
+
 static struct dc_cap_funcs cap_funcs = {
 	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
 };
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
 	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
 	.update_bw_bounding_box = dcn315_update_bw_bounding_box,
 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+	.get_panel_config_defaults = dcn315_get_panel_config_defaults,
 };
 
 static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
 			.afmt = true,
 		}
 	},
-	.optimize_edp_link_rate = true,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
 	.use_max_lb = true
 };
 
+static const struct dc_panel_config panel_config_defaults = {
+	.ilr = {
+		.optimize_edp_link_rate = true,
+	},
+};
+
 static void dcn31_dpp_destroy(struct dpp **dpp)
 {
 	kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
 	return pipe_cnt;
 }
 
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+	*panel_config = panel_config_defaults;
+}
+
 static struct dc_cap_funcs cap_funcs = {
 	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
 };
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
 	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
 	.update_bw_bounding_box = dcn316_update_bw_bounding_box,
 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+	.get_panel_config_defaults = dcn316_get_panel_config_defaults,
 };
 
 static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..4ff1392633a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -232,6 +232,7 @@ struct resource_funcs {
             unsigned int index);
 
 	bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+	void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
 };
 
 struct audio_support{
commit 7aeb2e47e43d5acd4638c64b4c0c01ad90feea51
Author: Iswara Nagulendran <Iswara.Nagulendran at amd.com>
Date:   Mon Sep 19 15:53:56 2022 -0400

    drm/amd/display: Allow PSR exit when panel is disconnected
    
    [HOW&WHY]
    Fixed check to only avoid PSR entry when panel
    is disconnected. PSR exit can be permitted to restore
    the HW to it's non-PSR state.
    
    Reviewed-by: Jayendran Ramani <Jayendran.Ramani at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Iswara Nagulendran <Iswara.Nagulendran at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d19fb92333b..895c6e6bfeb8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3143,7 +3143,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
 	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
 		return false;
 
-	if (allow_active && link->type == dc_connection_none) {
+	if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
 		// Don't enter PSR if panel is not connected
 		return false;
 	}
commit 749b6c2ac9d9a7a4d8f4c2e4dc6fa830fd6c6ac7
Author: Leo (Hanghong) Ma <hanghong.ma at amd.com>
Date:   Tue Sep 20 15:23:42 2022 -0400

    drm/amd/display: AUX tracing cleanup
    
    [Why && How]
    Remove the unnecessary AUX trace and use one trace for AUX failure.
    
    Reviewed-by: Martin Leung <Martin.Leung at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Leo (Hanghong) Ma <hanghong.ma at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 		case AUX_RET_ERROR_ENGINE_ACQUIRE:
 		case AUX_RET_ERROR_UNKNOWN:
 		default:
-			DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
-						LOG_FLAG_I2cAux_DceAux,
-						"dce_aux_transfer_with_retries: Failure: operation_result=%d",
-						(int)operation_result);
 			goto fail;
 		}
 	}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 fail:
 	DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
 				LOG_FLAG_Error_I2cAux,
-				"dce_aux_transfer_with_retries: FAILURE");
+				"%s: Failure: operation_result=%d",
+				__func__,
+				(int)operation_result);
 	if (!payload_reply)
 		payload->reply = NULL;
 
-	DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
-				WPP_BIT_FLAG_DC_ERROR,
-				"AUX transaction failed. Result: %d",
-				operation_result);
-
 	return false;
 }
commit 283e0a673cdf59fe103707ac0466492b315c81a2
Author: Wenjing Liu <wenjing.liu at amd.com>
Date:   Thu Sep 22 14:22:04 2022 -0400

    drm/amd/display: fix integer overflow during MSA V_Freq calculation
    
    [why]
    Analyzer shows incorrect V freq in MSA for some large timing.
    
    [how]
    Cast an 32 bit integer to uint64_t before multiplication to avoid
    integer overflow for a very large timing.
    
    Reviewed-by: Ariel Bernstein <Eric.Bernstein at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Wenjing Liu <wenjing.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 52fb2bf3d578..d71d89268a07 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
 	uint32_t h_back_porch;
 	uint32_t h_width;
 	uint32_t v_height;
-	unsigned long long v_freq;
+	uint64_t v_freq;
 	uint8_t misc0 = 0;
 	uint8_t misc1 = 0;
 	uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
 	v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
 	hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
 	vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
-	v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+	v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
 
 	/*   MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
 	 *
commit 40169e2f37127b7fe60736045b1f9fc04f76b471
Author: Alvin Lee <Alvin.Lee2 at amd.com>
Date:   Tue Sep 20 19:26:27 2022 -0400

    drm/amd/display: Disable GSL when enabling phantom pipe
    
    [Description]
    When enabling phantom pipe on a pipe that was previously
    using immediate flip, we have to disable GSL or this will
    prevent the update from taking place right away on the phantom
    pipe when we enable it in FW
    
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2038cbda33f7..830562f4139d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
 	uint32_t reg_val;
 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
 
+	/* For phantom pipe enable, disable GSL */
+	REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
 	REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
 	reg_val = REG_READ(DCHUBP_CNTL);
 	if (reg_val) {
commit dfb3367bd082ccf52d3c13ff62257f08407dffcf
Author: Charlene Liu <Charlene.Liu at amd.com>
Date:   Tue Sep 20 09:23:06 2022 -0400

    drm/amd/display: prevent S4 test from failing
    
    [why]
    limit the vm prefetch check for now, until the feature is fully
    verified.
    
    Reviewed-by: Hansen Dsouza <Hansen.Dsouza at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Charlene Liu <Charlene.Liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
 void dcn21_dchvm_init(struct hubbub *hubbub)
 {
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
-	uint32_t riommu_active, prefetch_done;
+	uint32_t riommu_active;
 	int i;
 
-	REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
-	if (prefetch_done) {
-		hubbub->riommu_active = true;
-		return;
-	}
 	//Init DCHVM block
 	REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
 
commit 96ab3cb3b0f862308a03046d01d66c7b4154846b
Author: Aric Cyr <aric.cyr at amd.com>
Date:   Mon Sep 19 17:42:22 2022 -0400

    Revert "drm/amd/display: correct hostvm flag"
    
    This reverts commit 796d6a37ff5ffaf9f2dc0f3f4bf9f4a1034c00de.
    
    4K144 resolution isn't available on DCN31.
    
    Reviewed-by: Sherry Wang <Yao.Wang1 at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Aric Cyr <aric.cyr at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8c1a6fb36306..8745132d6374 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -890,7 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 	.disable_z10 = true,
 	.optimize_edp_link_rate = true,
 	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
-	.dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+	.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
commit d35e8b7ae01430b1e722547b2ef40f42dc30520f
Author: Alvin Lee <Alvin.Lee2 at amd.com>
Date:   Tue Sep 20 10:46:18 2022 -0400

    drm/amd/display: Block SubVP if rotation being used
    
    [Description]
    - SubVP rotation support is not explicitly implemented,
      so block SubVP in rotation cases to avoid unexpected
      behaviors
    
    Reviewed-by: Nevenko Stupar <Nevenko.Stupar at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 55945cca2260..a24f538bdc4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -108,6 +108,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
 
 bool dcn32_mpo_in_use(struct dc_state *context);
 
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
 struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
 		struct dc_state *state,
 		const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index a2a70a1572b7..7f318ced5dee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -233,6 +233,23 @@ bool dcn32_mpo_in_use(struct dc_state *context)
 	return false;
 }
 
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe->stream)
+			continue;
+
+		if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+			return true;
+	}
+	return false;
+}
+
 /**
  * *******************************************************************************************
  * dcn32_determine_det_override: Determine DET allocation for each pipe
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 0571700f53f9..a56ee04f7df9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1115,7 +1115,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
 	 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
 	 */
 	if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
-	    !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+	    !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+		(*vlevel == context->bw_ctx.dml.soc.num_states ||
 	    vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
 	    dc->debug.force_subvp_mclk_switch)) {
 
commit ab5220bb5a910246c61512a9e29a4e2406cb1ecd
Author: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Date:   Thu Sep 8 11:37:58 2022 -0400

    drm/amd/display: fix dcn315 dml detile overestimation
    
    DML does not take the fact that dcn315 does not have enough detile
    buffer to max all pipes. This change adds a workaround to apply
    the same logic DC does when calculating detile buffer size in DML.
    
    Reviewed-by: Charlene Liu <Charlene.Liu at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b6e99eefe869..94b0842cd89b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -739,7 +739,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 	}
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-		dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+		dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
 	else
 		dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 8dfe639b6508..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
 #define BPP_BLENDED_PIPE 0xffffffff
 #define DCN31_MAX_DSC_IMAGE_WIDTH 5184
 #define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
 
 // For DML-C changes that hasn't been propagated to VBA yet
 //#define __DML_VBA_ALLOW_DELTA__
@@ -3775,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
 		&v->VReadyOffsetPix[k]);
 }
 
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+	int i, total_pipes = 0;
+	for (i = 0; i < NumberOfActivePlanes; i++)
+		total_pipes += NoOfDPPThisState[i];
+	*DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+	if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+		*DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
 void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
 {
 	struct vba_vars_st *v = &mode_lib->vba;
@@ -4533,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
 			}
 
+			if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+				PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
 			CalculateSwathAndDETConfiguration(
 					false,
 					v->NumberOfActivePlanes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index f5400eda07a5..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -114,6 +114,7 @@ void dml_init_instance(struct display_mode_lib *lib,
 		break;
 	case DML_PROJECT_DCN31:
 	case DML_PROJECT_DCN31_FPGA:
+	case DML_PROJECT_DCN315:
 		lib->funcs = dml31_funcs;
 		break;
 	case DML_PROJECT_DCN314:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index b1878a1440e2..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,6 +40,7 @@ enum dml_project {
 	DML_PROJECT_DCN21,
 	DML_PROJECT_DCN30,
 	DML_PROJECT_DCN31,
+	DML_PROJECT_DCN315,
 	DML_PROJECT_DCN31_FPGA,
 	DML_PROJECT_DCN314,
 	DML_PROJECT_DCN32,
commit c1969fbaa57d88ddef626bb8ae313d38478d8631
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Mon Sep 19 13:14:02 2022 -0400

    drm/amd/display: Reorder FCLK P-state switch sequence for DCN32
    
    [WHY?]
    In some cases, DCFCLK hardmin requests are not acknowledged by SMU as
    the requested clock does not have a compatible ratio with current FCLK,
    and it cannot be changed as FCLK P-state is not allowed.
    
    [HOW?]
    Allow FCLK p-state change prior to changing DCFCLK hardmin.
    
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index f0f3f66629cc..96d5e0d5b3ce 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -333,6 +333,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 		if (enter_display_off == safe_to_lower)
 			dcn30_smu_set_num_of_displays(clk_mgr, display_count);
 
+		clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+		total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+		fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+		if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+			clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+			/* To enable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
+			if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+				/* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+				dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+			}
+		}
+
 		if (dc->debug.force_min_dcfclk_mhz > 0)
 			new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
 					new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -352,7 +367,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 			clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
 
 		clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-		clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
 		clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
 
 		if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -361,9 +375,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 			dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
 		}
 
-		total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
 		p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
-		fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
 		if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
 			clk_mgr_base->clks.p_state_change_support = p_state_change_support;
 
@@ -373,15 +386,14 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 						clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
 		}
 
-		if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
-				clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
-			clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+		/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+		if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+			update_fclk = true;
+		}
 
-			/* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
-			if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
-				/* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
-				dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
-			}
+		if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+			/* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+			dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
 		}
 
 		/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -390,21 +402,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 			update_uclk = true;
 		}
 
-		/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
-		if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
-			update_fclk = true;
-		}
-
 		/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
 		if (clk_mgr_base->clks.p_state_change_support &&
 				(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
 			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
 
-		if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
-			/* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
-			dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
-		}
-
 		if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
 				clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
 			clk_mgr_base->clks.num_ways = new_clocks->num_ways;
commit f7367b5fe0e38af02d6915a355f2ee63b172c9ac
Author: Dillon Varone <Dillon.Varone at amd.com>
Date:   Mon Sep 19 10:29:24 2022 -0400

    drm/amd/display: Program SubVP in dc_commit_state_no_check
    
    [Why?]
    Currently SubVP programming is only done in commit_planes_for_stream, as
    it was expected only this call would add/remove planes from a
    display.
    
    [How?]
    Add SubVP programming to dc_commit_state_no_check.
    
    Reviewed-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Acked-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ccaa43d071cf..4ba2c1f95dcb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1734,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	int i, k, l;
 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
 	struct dc_state *old_state;
+	bool subvp_prev_use = false;
 
 	dc_z10_restore(dc);
 	dc_allow_idle_optimizations(dc, false);
 
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		/* Check old context for SubVP */
+		subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+		if (subvp_prev_use)
+			break;
+	}
+
 	for (i = 0; i < context->stream_count; i++)
 		dc_streams[i] =  context->streams[i];
 
@@ -1777,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
 	}
 
+	if (dc->hwss.subvp_pipe_control_lock)
+		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
 	result = dc->hwss.apply_ctx_to_hw(dc, context);
 
 	if (result != DC_OK) {
@@ -1794,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 		dc->hwss.interdependent_update_lock(dc, context, false);
 		dc->hwss.post_unlock_program_front_end(dc, context);
 	}
+
+	if (dc->hwss.commit_subvp_config)
+		dc->hwss.commit_subvp_config(dc, context);
+	if (dc->hwss.subvp_pipe_control_lock)
+		dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
 	for (i = 0; i < context->stream_count; i++) {
 		const struct dc_link *link = context->streams[i]->link;
 
commit 8abbc4f768ddc5c2190ab8966e529cec42b4b2d4
Author: Li Zhong <floridsleeves at gmail.com>
Date:   Sat Sep 24 15:19:39 2022 -0700

    drivers/amd/pm: check the return value of amdgpu_bo_kmap
    
    amdgpu_bo_kmap() returns error when fails to map buffer object. Add the
    error check and propagate the error.
    
    Signed-off-by: Li Zhong <floridsleeves at gmail.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
 static void kv_dpm_disable(struct amdgpu_device *adev)
 {
 	struct kv_power_info *pi = kv_get_pi(adev);
+	int err;
 
 	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
 		       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
 	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
 		       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
 
-	amdgpu_kv_smc_bapm_enable(adev, false);
+	err = amdgpu_kv_smc_bapm_enable(adev, false);
+	if (err)
+		DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
 
 	if (adev->asic_type == CHIP_MULLINS)
 		kv_enable_nb_dpm(adev, false);
commit d37f379ad04dcc21ebd1d2380c3bc979d54f7c46
Author: Yang Yingliang <yangyingliang at huawei.com>
Date:   Thu Sep 29 17:02:00 2022 +0800

    drm/amd/display: change to enc314_stream_encoder_dp_blank static
    
    enc314_stream_encoder_dp_blank is only used in dcn314_dio_stream_encoder.c now,
    change it to static.
    
    Fixes: c55bf690fe79 ("drm/amd/display: Add explicit FIFO disable for DP blank")
    Signed-off-by: Yang Yingliang <yangyingliang at huawei.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 0d2ffb692957..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -262,7 +262,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 	return two_pix;
 }
 
-void enc314_stream_encoder_dp_blank(
+static void enc314_stream_encoder_dp_blank(
 	struct dc_link *link,
 	struct stream_encoder *enc)
 {
commit 8ab1d7a27eff87001ebd0977db600e4187f63f78
Author: Alvin Lee <Alvin.Lee2 at amd.com>
Date:   Wed Aug 17 10:47:59 2022 -0400

    drm/amd/display: Only commit SubVP state after pipe programming
    
    [Description]
    We only want to commit the SubVP config to DMCUB
    after the main and phantom pipe programming has
    completed. Commiting the state early can cause
    issues such as P-State being allowed by the HW
    early which causes the SubVP state machine to
    go into a bad state
    
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Brian Chang <Brian.Chang at amd.com>
    Signed-off-by: Alvin Lee <Alvin.Lee2 at amd.com>
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 258ba5a872b1..ccaa43d071cf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3495,6 +3495,9 @@ static void commit_planes_for_stream(struct dc *dc,
 
 	if (update_type != UPDATE_TYPE_FAST)
 		dc->hwss.post_unlock_program_front_end(dc, context);
+		if (update_type != UPDATE_TYPE_FAST)
+			if (dc->hwss.commit_subvp_config)
+				dc->hwss.commit_subvp_config(dc, context);
 
 	if (update_type != UPDATE_TYPE_FAST)
 		if (dc->hwss.commit_subvp_config)
commit 91954c6c904b515baafaee6a1f35c94409a3bb68
Author: Daniel Gomez <daniel at qtec.com>
Date:   Sun Sep 25 23:53:13 2022 +0200

    drm/amd/display: Fix mutex lock in dcn10
    
    Removal of DC_FP_* wrappers from dml (9696679bf7ac) provokes a mutex
    lock [2] on the amdgpu driver. Re-arrange the dcn10 code to avoid
    locking the mutex by placing the DC_FP_* wrappers around the proper
    functions.
    
    This fixes the following WARN/stacktrace:
    
    BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283
    in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 227, name: systemd-udevd
    preempt_count: 1, expected: 0
    CPU: 4 PID: 227 Comm: systemd-udevd Not tainted 6.0.0-rc6-qtec-standard #2
    Hardware name: Qtechnology A/S QT5222/QT5221, BIOS v1.0.1 06/07/2021
    Call Trace:
     <TASK>
     dump_stack_lvl+0x33/0x42
     __might_resched.cold.172+0xa5/0xb3
     mutex_lock+0x1a/0x40
     amdgpu_dpm_get_clock_by_type_with_voltage+0x38/0x70 [amdgpu]
     dm_pp_get_clock_levels_by_type_with_voltage+0x64/0xa0 [amdgpu]
     dcn_bw_update_from_pplib+0x70/0x340 [amdgpu]
     dcn10_create_resource_pool+0x8c8/0xd20 [amdgpu]
     ? __kmalloc+0x1c7/0x4a0
     dc_create_resource_pool+0xe7/0x190 [amdgpu]
     dc_create+0x212/0x5d0 [amdgpu]
     amdgpu_dm_init+0x246/0x370 [amdgpu]
     ? schedule_hrtimeout_range_clock+0x93/0x120
     ? phm_wait_for_register_unequal.part.1+0x4a/0x80 [amdgpu]
     dm_hw_init+0xe/0x20 [amdgpu]
     amdgpu_device_init.cold.56+0x1324/0x1653 [amdgpu]
     ? pci_bus_read_config_word+0x43/0x80
     amdgpu_driver_load_kms+0x15/0x120 [amdgpu]
     amdgpu_pci_probe+0x116/0x320 [amdgpu]
     pci_device_probe+0x97/0x110
     really_probe+0xdd/0x340
     __driver_probe_device+0x80/0x170
     driver_probe_device+0x1f/0x90
     __driver_attach+0xdc/0x180
     ? __device_attach_driver+0x100/0x100
     ? __device_attach_driver+0x100/0x100
     bus_for_each_dev+0x74/0xc0
     bus_add_driver+0x19e/0x210
     ? kset_find_obj+0x30/0xa0
     ? 0xffffffffa0a5b000
     driver_register+0x6b/0xc0
     ? 0xffffffffa0a5b000
     do_one_initcall+0x4a/0x1f0
     ? __vunmap+0x28e/0x2f0
     ? __cond_resched+0x15/0x30
     ? kmem_cache_alloc_trace+0x3d/0x440
     do_init_module+0x4a/0x1e0
     load_module+0x1cba/0x1e10
     ? __do_sys_finit_module+0xb7/0x120
     __do_sys_finit_module+0xb7/0x120
     do_syscall_64+0x3c/0x80
     entry_SYSCALL_64_after_hwframe+0x63/0xcd
    RIP: 0033:0x7ff2b5f5422d
    Code: 5d c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48
    89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
    3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 ab 0e 00 f7 d8 64 89 01 48
    RSP: 002b:00007ffc44ab28e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
    RAX: ffffffffffffffda RBX: 0000555c566a9240 RCX: 00007ff2b5f5422d
    RDX: 0000000000000000 RSI: 00007ff2b60bb353 RDI: 0000000000000019
    RBP: 00007ff2b60bb353 R08: 0000000000000000 R09: 0000555c566a9240
    R10: 0000000000000019 R11: 0000000000000246 R12: 0000000000000000
    R13: 0000000000020000 R14: 0000000000000000 R15: 0000000000000000
    </TASK>
    
    Fixes: 9696679bf7ac ("drm/amd/display: remove DC_FP_* wrapper from dml folder")
    Reviewed-by: Hamza Mahfooz <hamza.mahfooz at amd.com>
    Tested-by: Daniel Wheeler <daniel.wheeler at amd.com>
    Signed-off-by: Daniel Gomez <daniel at qtec.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 72521749c01d..4390f6d7050f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3005,6 +3005,7 @@ void dcn10_prepare_bandwidth(
 {
 	struct dce_hwseq *hws = dc->hwseq;
 	struct hubbub *hubbub = dc->res_pool->hubbub;
+	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
 
 	if (dc->debug.sanity_checks)
 		hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3027,8 +3028,11 @@ void dcn10_prepare_bandwidth(
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
 		DC_FP_START();
-		dcn_bw_notify_pplib_of_wm_ranges(dc);
+		dcn_get_soc_clks(
+			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
 		DC_FP_END();
+		dcn_bw_notify_pplib_of_wm_ranges(
+			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
 	}
 
 	if (dc->debug.sanity_checks)
@@ -3041,6 +3045,7 @@ void dcn10_optimize_bandwidth(
 {
 	struct dce_hwseq *hws = dc->hwseq;
 	struct hubbub *hubbub = dc->res_pool->hubbub;
+	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
 
 	if (dc->debug.sanity_checks)
 		hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3064,8 +3069,11 @@ void dcn10_optimize_bandwidth(
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
 		DC_FP_START();
-		dcn_bw_notify_pplib_of_wm_ranges(dc);
+		dcn_get_soc_clks(
+			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
 		DC_FP_END();
+		dcn_bw_notify_pplib_of_wm_ranges(
+			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
 	}
 
 	if (dc->debug.sanity_checks)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
 	}
 }
 
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+	int i;
+
+	if (clks->num_levels == 0)
+		return false;
+
+	for (i = 0; i < clks->num_levels; i++)
+		/* Ensure that the result is sane */
+		if (clks->data[i].clocks_in_khz == 0)
+			return false;
+
+	return true;
+}
+
 static bool dcn10_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
 	int j;
 	struct dc_context *ctx = dc->ctx;
 	uint32_t pipe_fuses = read_pipe_fuses(ctx);
+	struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+	bool res;
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
 			&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
 		dc->debug.az_endpoint_mute_only = false;
 
-	DC_FP_START();
-	if (!dc->debug.disable_pplib_clock_request)
-		dcn_bw_update_from_pplib(dc);
+
+	if (!dc->debug.disable_pplib_clock_request) {
+		/*
+		 * TODO: This is not the proper way to obtain
+		 * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+		 */
+		res = dm_pp_get_clock_levels_by_type_with_voltage(
+				ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+		DC_FP_START();
+
+		if (res)
+			res = verify_clock_values(&fclks);
+
+		if (res)
+			dcn_bw_update_from_pplib_fclks(dc, &fclks);
+		else
+			BREAK_TO_DEBUGGER();
+
+		DC_FP_END();
+
+		res = dm_pp_get_clock_levels_by_type_with_voltage(
+			ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+		DC_FP_START();
+
+		if (res)
+			res = verify_clock_values(&dcfclks);
+
+		if (res)
+			dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+		else
+			BREAK_TO_DEBUGGER();
+
+		DC_FP_END();
+	}
+
 	dcn_bw_sync_calcs_and_dml(dc);
 	if (!dc->debug.disable_pplib_wm_range) {
 		dc->res_pool = &pool->base;
-		dcn_bw_notify_pplib_of_wm_ranges(dc);
+		DC_FP_START();
+		dcn_get_soc_clks(
+			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+		DC_FP_END();
+		dcn_bw_notify_pplib_of_wm_ranges(
+			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
 	}
-	DC_FP_END();
 
 	{
 		struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
 	return dcf_clk;
 }
 
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+	struct dc *dc,
+	struct dm_pp_clock_levels_with_voltage *fclks)
 {
-	int i;
-
-	if (clks->num_levels == 0)
-		return false;
-
-	for (i = 0; i < clks->num_levels; i++)
-		/* Ensure that the result is sane */
-		if (clks->data[i].clocks_in_khz == 0)
-			return false;
+	unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
 
-	return true;
+	ASSERT(fclks->num_levels);
+
+	vmin0p65_idx = 0;
+	vmid0p72_idx = fclks->num_levels -
+		(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+	vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+	vmax0p9_idx = fclks->num_levels - 1;
+
+	dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+		32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+	dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+		dc->dcn_soc->number_of_channels *
+		(fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+		* ddr4_dram_factor_single_Channel / 1000.0;
+	dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+		dc->dcn_soc->number_of_channels *
+		(fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+		* ddr4_dram_factor_single_Channel / 1000.0;
+	dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+		dc->dcn_soc->number_of_channels *
+		(fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+		* ddr4_dram_factor_single_Channel / 1000.0;
 }
 
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+	struct dc *dc,
+	struct dm_pp_clock_levels_with_voltage *dcfclks)
 {
-	struct dc_context *ctx = dc->ctx;
-	struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
-	bool res;
-	unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
-	/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
-	res = dm_pp_get_clock_levels_by_type_with_voltage(
-			ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
-	if (res)
-		res = verify_clock_values(&fclks);
-
-	if (res) {
-		ASSERT(fclks.num_levels);
-
-		vmin0p65_idx = 0;
-		vmid0p72_idx = fclks.num_levels -
-			(fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
-		vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
-		vmax0p9_idx = fclks.num_levels - 1;
-
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
-			32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
-			dc->dcn_soc->number_of_channels *
-			(fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
-			* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
-			dc->dcn_soc->number_of_channels *
-			(fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
-			* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
-			dc->dcn_soc->number_of_channels *
-			(fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
-			* ddr4_dram_factor_single_Channel / 1000.0;
-	} else
-		BREAK_TO_DEBUGGER();
-
-	res = dm_pp_get_clock_levels_by_type_with_voltage(
-			ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
-	if (res)
-		res = verify_clock_values(&dcfclks);
+	if (dcfclks->num_levels >= 3) {
+		dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+	}
+}
 
-	if (res && dcfclks.num_levels >= 3) {
-		dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
-		dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
-		dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
-		dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
-	} else
-		BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+	struct dc *dc,
+	int *min_fclk_khz,
+	int *min_dcfclk_khz,
+	int *socclk_khz)
+{
+	*min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+	*min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+	*socclk_khz = dc->dcn_soc->socclk * 1000;
 }
 
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+	struct dc *dc,
+	int min_fclk_khz,
+	int min_dcfclk_khz,
+	int socclk_khz)
 {
 	struct pp_smu_funcs_rv *pp = NULL;
 	struct pp_smu_wm_range_sets ranges = {0};
-	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
 	const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
 
 	if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 	if (!pp || !pp->set_wm_ranges)
 		return;
 
-	min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
-	min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
-	socclk_khz = dc->dcn_soc->socclk * 1000;
-
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
 	 * depending on DPM state they are in. And update BW MGR GFX Engine and
 	 * Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
 	const struct dc *dc,
 	struct dc_clocks *clocks);
 
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+		struct dc *dc,
+		int *min_fclk_khz,
+		int *min_dcfclk_khz,
+		int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+		struct dc *dc,
+		struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+		struct dc *dc,
+		struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+		struct dc *dc,
+		int min_fclk_khz,
+		int min_dcfclk_khz,
+		int socclk_khz);
 void dcn_bw_sync_calcs_and_dml(struct dc *dc);
 
 enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
commit cdf6428dd518435a05739abf7659589de30970f4
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Tue Sep 27 21:24:55 2022 +0300

    drm/i915: Reject excessive dotclocks early
    
    Make sure modes with crazy big dotclocks are rejected early,
    so as to not cause problems for subsequent code via integer
    overflows and whatnot.
    
    These would eventually be rejected in intel_crtc_compute_pipe_mode()
    but that is now too late as we do the clock computations a bit
    earlier than that. And we don't want to just reorder the two since
    we still want to check the final computed dotclock against the
    hardware limit to make sure we didn't end up above the limit due
    to rounding/etc.
    
    Fixes: 0ff0e219d9b8 ("drm/i915: Compute clocks earlier")
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20220927182455.3422-1-ville.syrjala@linux.intel.com
    Reviewed-by: Jani Nikula <jani.nikula at intel.com>
    (cherry picked from commit df2f59c5857b56a5cc40b6562b032c5d8d50cdfc)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index accf5311b664..f0063e0d4ed3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8130,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
 }
 
+static int max_dotclock(struct drm_i915_private *i915)
+{
+	int max_dotclock = i915->max_dotclk_freq;
+
+	/* icl+ might use bigjoiner */
+	if (DISPLAY_VER(i915) >= 11)
+		max_dotclock *= 2;
+
+	return max_dotclock;
+}
+
 static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
 		 const struct drm_display_mode *mode)
@@ -8167,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev,
 			   DRM_MODE_FLAG_CLKDIV2))
 		return MODE_BAD;
 
+	/*
+	 * Reject clearly excessive dotclocks early to
+	 * avoid having to worry about huge integers later.
+	 */
+	if (mode->clock > max_dotclock(dev_priv))
+		return MODE_CLOCK_HIGH;
+
 	/* Transcoder timing limits */
 	if (DISPLAY_VER(dev_priv) >= 11) {
 		hdisplay_max = 16384;
commit b2e3a1af8cce4117de06ff1a4eab0749753ede27
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Oct 3 14:15:43 2022 +0300

    drm/i915: Fix watermark calculations for DG2 CCS+CC modifier
    
    Take the DG2 CCS+CC modifier into account when calculating the
    watermarks. Othwerwise we'll calculate the watermarks thinking this
    tile-4 modifier is linear.
    
    The rc_surface part is actually a nop since that is not used
    for any glk+ platform.
    
    Cc: stable at vger.kernel.org
    Fixes: 680025dcc400 ("drm/i915/dg2: Add support for DG2 clear color compression")
    Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003111544.8007-6-ville.syrjala@linux.intel.com
    (cherry picked from commit 334810f82024815283a6e7febd3d2de1fed6c232)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index a120d49b95ca..18178b01375e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1715,7 +1715,8 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
 		      modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
-		      modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
+		      modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+		      modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
@@ -1723,7 +1724,8 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
 			 modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
-			 modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
+			 modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+			 modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
commit ccfa6d35f9233702c924316cdf40c05b6ce88113
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Oct 3 14:15:42 2022 +0300

    drm/i915: Fix watermark calculations for DG2 CCS modifiers
    
    Take the DG2 CCS modifiers into account when calculating the
    watermarks. Othwerwise we'll calculate the watermarks thinking these
    tile-4 modifiers are linear.
    
    The rc_surface part is actually a nop since that is not used
    for any glk+ platform.
    
    Cc: stable at vger.kernel.org
    Fixes: 4c3afa72138c ("drm/i915/dg2: Add support for DG2 render and media compression")
    Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003111544.8007-5-ville.syrjala@linux.intel.com
    (cherry picked from commit f25d9f81a8e09ace4f04106995550bae1f522143)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 3676662897e7..a120d49b95ca 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1713,13 +1713,17 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
-		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+		      modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+		      modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
-			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+			 modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+			 modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
commit 070a2855900de17b1e11a0dc35af9794e80f1a28
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Oct 3 14:15:41 2022 +0300

    drm/i915: Fix watermark calculations for gen12+ CCS+CC modifier
    
    Take the gen12+ CCS+CC modifier into account when calculating the
    watermarks. Othwerwise we'll calculate the watermarks thinking this
    Y-tiled modifier is linear.
    
    The rc_surface part is actually a nop since that is not used
    for any glk+ platform.
    
    Cc: stable at vger.kernel.org
    Fixes: d1e2775e9b96 ("drm/i915/tgl: Add Clear Color support for TGL Render Decompression")
    Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003111544.8007-4-ville.syrjala@linux.intel.com
    (cherry picked from commit a627455bbe50a111475d7a42beb58fa64bd96c83)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 49fc5e2b56fd..3676662897e7 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1712,12 +1712,14 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
-		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
-			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
commit 484b2b9281000274ef7c5cb0a9ebc5da6f5c281c
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Oct 3 14:15:40 2022 +0300

    drm/i915: Fix watermark calculations for gen12+ MC CCS modifier
    
    Take the gen12+ MC CCS modifier into account when calculating the
    watermarks. Othwerwise we'll calculate the watermarks thinking this
    Y-tiled modifier is linear.
    
    The rc_surface part is actually a nop since that is not used
    for any glk+ platform.
    
    v2: Split RC CCS vs. MC CCS to separate patches
    
    Cc: stable at vger.kernel.org
    Fixes: 2dfbf9d2873a ("drm/i915/tgl: Gen-12 display can decompress surfaces compressed by the media engine")
    Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003111544.8007-3-ville.syrjala@linux.intel.com
    (cherry picked from commit 91c9651425fe955b1387f3637607dda005f3f710)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 132baada3e11..49fc5e2b56fd 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1711,11 +1711,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      modifier == I915_FORMAT_MOD_Yf_TILED ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
-		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
-			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
commit c56453a00f19ccddee302f5f9fe96b80e0b47fd3
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Oct 3 14:15:39 2022 +0300

    drm/i915: Fix watermark calculations for gen12+ RC CCS modifier
    
    Take the gen12+ RC CCS modifier into account when calculating the
    watermarks. Othwerwise we'll calculate the watermarks thinking this
    Y-tiled modifier is linear.
    
    The rc_surface part is actually a nop since that is not used
    for any glk+ platform.
    
    v2: Split RC CCS vs. MC CCS to separate patches
    
    Cc: stable at vger.kernel.org
    Fixes: b3e57bccd68a ("drm/i915/tgl: Gen-12 render decompression")
    Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003111544.8007-2-ville.syrjala@linux.intel.com
    (cherry picked from commit a89a96a586114f67598c6391c75678b4dba5c2da)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 01b0932757ed..132baada3e11 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1710,10 +1710,12 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      modifier == I915_FORMAT_MOD_4_TILED ||
 		      modifier == I915_FORMAT_MOD_Yf_TILED ||
 		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
commit 1de2e7e08e8cd0f281ba9f079a25e72543fe82f6
Author: Jouni Högander <jouni.hogander at intel.com>
Date:   Mon Oct 3 10:20:11 2022 +0300

    drm/i915/psr: Fix PSR_IMR/IIR field handling
    
    Current PSR code is supposed to use TRANSCODER_EDP to force 0 shift for
    bits in PSR_IMR/IIR registers:
    
    /*
     * gen12+ has registers relative to transcoder and one per transcoder
     * using the same bit definition: handle it as TRANSCODER_EDP to force
     * 0 shift in bit definition
     */
    
    At the time of writing the code assumption "TRANSCODER_EDP == 0" was made.
    This is not the case and all fields in PSR_IMR and PSR_IIR are shifted
    incorrectly if DISPLAY_VER >= 12.
    
    Fix this by adding separate register field defines for >=12 and add bit
    getter functions to keep code readability.
    
    v4:
     - Remove EDP from TGL definitions (José)
     - Use REG_BIT and REG_GENMASK (José)
    v3:
     - Add separate register field defines (José)
     - Add bit getter functions (José)
    v2:
     - Improve commit message (José)
    
    Cc: José Roberto de Souza <jose.souza at intel.com>
    Cc: Mika Kahola <mika.kahola at intel.com>
    
    Fixes: 8241cfbe67f4 ("drm/i915/tgl: Access the right register when handling PSR interruptions")
    Signed-off-by: Jouni Högander <jouni.hogander at intel.com>
    Reviewed-by: José Roberto de Souza <jose.souza at intel.com>
    Signed-off-by: José Roberto de Souza <jose.souza at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20221003072011.72408-1-jouni.hogander@intel.com
    (cherry picked from commit 8da8e32e0b095613af2c2ce4b322240269164a8e)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9def8d9fade6..d4cce627d7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
 	}
 }
 
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+		EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+		EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
 static void psr_irq_control(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum transcoder trans_shift;
 	i915_reg_t imr_reg;
 	u32 mask, val;
 
-	/*
-	 * gen12+ has registers relative to transcoder and one per transcoder
-	 * using the same bit definition: handle it as TRANSCODER_EDP to force
-	 * 0 shift in bit definition
-	 */
-	if (DISPLAY_VER(dev_priv) >= 12) {
-		trans_shift = 0;
+	if (DISPLAY_VER(dev_priv) >= 12)
 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
-	} else {
-		trans_shift = intel_dp->psr.transcoder;
+	else
 		imr_reg = EDP_PSR_IMR;
-	}
 
-	mask = EDP_PSR_ERROR(trans_shift);
+	mask = psr_irq_psr_error_bit_get(intel_dp);
 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
-		mask |= EDP_PSR_POST_EXIT(trans_shift) |
-			EDP_PSR_PRE_ENTRY(trans_shift);
+		mask |= psr_irq_post_exit_bit_get(intel_dp) |
+			psr_irq_pre_entry_bit_get(intel_dp);
 
-	/* Warning: it is masking/setting reserved bits too */
 	val = intel_de_read(dev_priv, imr_reg);
-	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+	val &= ~psr_irq_mask_get(intel_dp);
 	val |= ~mask;
 	intel_de_write(dev_priv, imr_reg, val);
 }
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	ktime_t time_ns =  ktime_get();
-	enum transcoder trans_shift;
 	i915_reg_t imr_reg;
 
-	if (DISPLAY_VER(dev_priv) >= 12) {
-		trans_shift = 0;
+	if (DISPLAY_VER(dev_priv) >= 12)
 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
-	} else {
-		trans_shift = intel_dp->psr.transcoder;
+	else
 		imr_reg = EDP_PSR_IMR;
-	}
 
-	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
 		intel_dp->psr.last_entry_attempt = time_ns;
 		drm_dbg_kms(&dev_priv->drm,
 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
 			    transcoder_name(cpu_transcoder));
 	}
 
-	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
 		intel_dp->psr.last_exit = time_ns;
 		drm_dbg_kms(&dev_priv->drm,
 			    "[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 		}
 	}
 
-	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
 		u32 val;
 
 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 		 * or unset irq_aux_error.
 		 */
 		val = intel_de_read(dev_priv, imr_reg);
-		val |= EDP_PSR_ERROR(trans_shift);
+		val |= psr_irq_psr_error_bit_get(intel_dp);
 		intel_de_write(dev_priv, imr_reg, val);
 
 		schedule_work(&intel_dp->psr.work);
@@ -1194,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
 	 * first time that PSR HW tries to activate so lets keep PSR disabled
 	 * to avoid any rendering problems.
 	 */
-	if (DISPLAY_VER(dev_priv) >= 12) {
+	if (DISPLAY_VER(dev_priv) >= 12)
 		val = intel_de_read(dev_priv,
 				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
-		val &= EDP_PSR_ERROR(0);
-	} else {
+	else
 		val = intel_de_read(dev_priv, EDP_PSR_IIR);
-		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
-	}
+	val &= psr_irq_psr_error_bit_get(intel_dp);
 	if (val) {
 		intel_dp->psr.sink_not_reliable = true;
 		drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a9bd829fc7e..0b287a59dc2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2157,10 +2157,18 @@
 #define TRANS_PSR_IIR(tran)			_MMIO_TRANS2(tran, _PSR_IIR_A)
 #define   _EDP_PSR_TRANS_SHIFT(trans)		((trans) == TRANSCODER_EDP ? \
 						 0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define   EDP_PSR_TRANS_MASK(trans)		(0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define   EDP_PSR_ERROR(trans)			(0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define   EDP_PSR_POST_EXIT(trans)		(0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define   EDP_PSR_PRE_ENTRY(trans)		(0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define   TGL_PSR_MASK			REG_GENMASK(2, 0)
+#define   TGL_PSR_ERROR			REG_BIT(2)
+#define   TGL_PSR_POST_EXIT		REG_BIT(1)
+#define   TGL_PSR_PRE_ENTRY		REG_BIT(0)
+#define   EDP_PSR_MASK(trans)		(TGL_PSR_MASK <<		\
+					 _EDP_PSR_TRANS_SHIFT(trans))
+#define   EDP_PSR_ERROR(trans)		(TGL_PSR_ERROR <<		\
+					 _EDP_PSR_TRANS_SHIFT(trans))
+#define   EDP_PSR_POST_EXIT(trans)	(TGL_PSR_POST_EXIT <<		\
+					 _EDP_PSR_TRANS_SHIFT(trans))
+#define   EDP_PSR_PRE_ENTRY(trans)	(TGL_PSR_PRE_ENTRY <<		\
+					 _EDP_PSR_TRANS_SHIFT(trans))
 
 #define _SRD_AUX_DATA_A				0x60814
 #define _SRD_AUX_DATA_EDP			0x6f814
commit af4e20d335d4414814030ba26f1689884c831269
Author: Tejas Upadhyay <tejas.upadhyay at intel.com>
Date:   Fri Sep 30 19:02:23 2022 +0530

    drm/i915/ehl: Update MOCS table for EHL
    
    Add these extra EHL entries back since we have
    drm-tip commit 13d29c823738
    ("drm/i915/ehl: unconditionally flush the pages on acquire")
    introduces proper flushing to make it work as expected.
    
    Cc: Chris Wilson <chris.p.wilson at intel.com>
    Cc: Matthew Auld <matthew.auld at intel.com>
    Fixes: 046091758b50 ("Revert "drm/i915/ehl: Update MOCS table for EHL"")
    Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
    Signed-off-by: Tejas Upadhyay <tejas.upadhyay at intel.com>
    Acked-by: Matthew Auld <matthew.auld at intel.com>
    Signed-off-by: Matthew Auld <matthew.auld at intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20220930133223.2757282-1-tejas.upadhyay@intel.com
    (cherry picked from commit 6fa964c045a6bc3321a9186e87bfbcfd1059b0f1)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c6ebe2781076..152244d7f62a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
 	MOCS_ENTRY(15, \
 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
 		   L3_3_WB), \
+	/* Bypass LLC - Uncached (EHL+) */ \
+	MOCS_ENTRY(16, \
+		   LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+		   L3_1_UC), \
+	/* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+	MOCS_ENTRY(17, \
+		   LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+		   L3_3_WB), \
 	/* Self-Snoop - L3 + LLC */ \
 	MOCS_ENTRY(18, \
 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
commit 54b978e03a3ce7aa3b40deeb1b4c0c9dd6660aa1
Author: Ville Syrjälä <ville.syrjala at linux.intel.com>
Date:   Mon Sep 26 22:30:21 2022 +0300

    drm/i915: Round to closest in g4x+ HDMI clock readout
    
    On pre-ddi platforms we have slightly different code being
    used for HDMI TMDS clock to dotclock conversion between the
    state computation and state readout. Both of these need to
    round the same way in order to not get a mismatch between
    the computed and read out states. Fix up the rounding
    direction in the readout path to match what is used during
    state computation.
    
    Another option would to just use intel_crtc_dotclock()
    in the readout path as well, but I don't really want to
    do that as the current code more accurately represents
    how the hardware really works; The HDMI port register
    defines whether we're actually outputting 8bpc or 12bpc
    over HDMI, and the PIPECONF bpc setting just defines what
    goes over FDI between the CPU and PCH. The fact that we
    try to cram all that into a single pipe_bpp during state
    computation is perhaps not entirely great...
    
    Fixes: f2c9df101095 ("drm/i915: Round TMDS clock to nearest")
    Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20220926193021.23287-1-ville.syrjala@linux.intel.com
    Reviewed-by: Jani Nikula <jani.nikula at intel.com>
    (cherry picked from commit 86b972ef1091882d66672399c6f8ebdd12a3b707)
    Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..2b73f5ff0d02 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
-		dotclock = pipe_config->port_clock * 2 / 3;
+		dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
 	else
 		dotclock = pipe_config->port_clock;
 


More information about the openchrome-devel mailing list