<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=us-ascii">
<style type="text/css" style="display:none;"> P {margin-top:0;margin-bottom:0;} </style>
</head>
<body dir="ltr">
<p style="font-family:Arial;font-size:10pt;color:#0000FF;margin:5pt;font-style:normal;font-weight:normal;text-decoration:none;" align="Left">
[AMD Official Use Only - General]<br>
</p>
<br>
<div>
<div style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);" class="elementToProof">
Series is:</div>
<div style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);" class="elementToProof">
Reviewed-by: Alex Deucher <alexander.deucher@amd.com><br>
</div>
<div id="appendonsend"></div>
<hr style="display:inline-block;width:98%" tabindex="-1">
<div id="divRplyFwdMsg" dir="ltr"><font face="Calibri, sans-serif" style="font-size:11pt" color="#000000"><b>From:</b> Lazar, Lijo <Lijo.Lazar@amd.com><br>
<b>Sent:</b> Monday, September 11, 2023 7:45 AM<br>
<b>To:</b> amd-gfx@lists.freedesktop.org <amd-gfx@lists.freedesktop.org><br>
<b>Cc:</b> Zhang, Hawking <Hawking.Zhang@amd.com>; Deucher, Alexander <Alexander.Deucher@amd.com><br>
<b>Subject:</b> [PATCH 1/2] drm/amdgpu: Use function for IP version check</font>
<div> </div>
</div>
<div class="BodyFragment"><font size="2"><span style="font-size:11pt;">
<div class="PlainText">Use an inline function for version check. Gives more flexibility to<br>
handle any format changes.<br>
<br>
Signed-off-by: Lijo Lazar <lijo.lazar@amd.com><br>
---<br>
 drivers/gpu/drm/amd/amdgpu/aldebaran.c        |  6 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  6 ++<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c    |  2 +-<br>
 .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c    |  2 +-<br>
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 29 +++---<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 92 +++++++++--------<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   | 28 ++++--<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c     |  3 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c       |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c       |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       | 12 ++-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c       | 11 ++-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c       | 48 +++++----<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c       | 38 ++++---<br>
 .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c    |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c     |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c      |  7 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  3 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c     | 15 +--<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c  |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c       | 11 ++-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c      |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c      |  3 +-<br>
 drivers/gpu/drm/amd/amdgpu/athub_v1_0.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/athub_v2_0.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/athub_v2_1.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/athub_v3_0.c       |  6 +-<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c        | 98 ++++++++++---------<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c        | 27 ++---<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c         | 83 ++++++++--------<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c       |  8 +-<br>
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c      | 13 ++-<br>
 drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c      |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c        | 28 +++---<br>
 drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c        |  8 +-<br>
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c         | 96 +++++++++---------<br>
 drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c         | 18 ++--<br>
 drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c         |  6 +-<br>
 drivers/gpu/drm/amd/amdgpu/imu_v11_0.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c        |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/mes_v10_1.c        | 10 +-<br>
 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c       | 12 +--<br>
 drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c     |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c       |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/navi10_ih.c        |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c        |  6 +-<br>
 drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c        | 10 +-<br>
 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c        |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/nv.c               |  8 +-<br>
 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c        |  7 +-<br>
 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c        |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/psp_v13_0.c        |  6 +-<br>
 drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c      |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c        | 48 +++++----<br>
 drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c      |  7 +-<br>
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c        |  4 +-<br>
 drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c        |  9 +-<br>
 drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c        |  3 +-<br>
 drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c   |  2 +-<br>
 drivers/gpu/drm/amd/amdgpu/soc15.c            | 27 +++--<br>
 drivers/gpu/drm/amd/amdgpu/soc21.c            | 12 +--<br>
 drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c    |  9 +-<br>
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c         |  9 +-<br>
 drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c         | 14 ++-<br>
 drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c         |  7 +-<br>
 drivers/gpu/drm/amd/amdgpu/vega20_ih.c        | 14 +--<br>
 drivers/gpu/drm/amd/amdkfd/kfd_device.c       | 12 ++-<br>
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c      |  2 +-<br>
 .../drm/amd/amdkfd/kfd_packet_manager_v9.c    |  3 +-<br>
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  2 +-<br>
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c          |  2 +-<br>
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 34 +++----<br>
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   | 10 +-<br>
 drivers/gpu/drm/amd/pm/amdgpu_pm.c            | 12 +--<br>
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 24 ++---<br>
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 46 +++++----<br>
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 63 +++++++-----<br>
 .../gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c    | 32 +++---<br>
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   |  8 +-<br>
 .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c    | 18 ++--<br>
 .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c  |  8 +-<br>
 .../drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c  |  2 +-<br>
 .../drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c  | 18 ++--<br>
 94 files changed, 714 insertions(+), 593 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c<br>
index 5d2516210a3a..02f4c6f9d4f6 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c<br>
@@ -35,7 +35,7 @@ static bool aldebaran_is_mode2_default(struct amdgpu_reset_control *reset_ctl)<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;<br>
 <br>
-       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&<br>
+       if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&<br>
              adev->gmc.xgmi.connected_to_cpu))<br>
                 return true;<br>
 <br>
@@ -154,7 +154,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,<br>
         if (reset_device_list == NULL)<br>
                 return -EINVAL;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&<br>
             reset_context->hive == NULL) {<br>
                 /* Wrong context, return error */<br>
                 return -EINVAL;<br>
@@ -335,7 +335,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,<br>
         if (reset_device_list == NULL)<br>
                 return -EINVAL;<br>
 <br>
-       if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==<br>
+       if (amdgpu_ip_version(reset_context->reset_req_dev, MP1_HWIP, 0) ==<br>
                     IP_VERSION(13, 0, 2) &&<br>
             reset_context->hive == NULL) {<br>
                 /* Wrong context, return error */<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h<br>
index 3a86d11d1605..927c049e6132 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h<br>
@@ -1101,6 +1101,12 @@ struct amdgpu_device {<br>
         uint32_t                        aid_mask;<br>
 };<br>
 <br>
+static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,<br>
+                                        uint8_t ip, uint8_t inst)<br>
+{<br>
+       return adev->ip_versions[ip][inst];<br>
+}<br>
+<br>
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)<br>
 {<br>
         return container_of(ddev, struct amdgpu_device, ddev);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c<br>
index 26ff5f8d9795..38b5457baded 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c<br>
@@ -707,7 +707,7 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)<br>
         /* Temporary workaround to fix issues observed in some<br>
          * compute applications when GFXOFF is enabled on GFX11.<br>
          */<br>
-       if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) {<br>
+       if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {<br>
                 pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");<br>
                 amdgpu_gfx_off_ctrl(adev, idle);<br>
         }<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c<br>
index d67d003bada2..b61a32d6af4b 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c<br>
@@ -658,7 +658,7 @@ static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev<br>
                                 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |<br>
                                 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 4))<br>
                 *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |<br>
                                         KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;<br>
 <br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c<br>
index 04b8c7dacd30..51011e8ee90d 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c<br>
@@ -677,7 +677,7 @@ void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,<br>
         int i;<br>
         uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))<br>
                 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,<br>
                                                         stall ? 1 << vmid : 0);<br>
         else<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c<br>
index 3d540b0cf0e1..ca56b5a543b4 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c<br>
@@ -1003,8 +1003,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)<br>
 <br>
         amdgpu_asic_pre_asic_init(adev);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||<br>
-           adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {<br>
                 amdgpu_psp_wait_for_bootloader(adev);<br>
                 ret = amdgpu_atomfirmware_asic_init(adev, true);<br>
                 return ret;<br>
@@ -2845,7 +2845,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)<br>
 {<br>
         int i, r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))<br>
                 return;<br>
 <br>
         for (i = 0; i < adev->num_ip_blocks; i++) {<br>
@@ -3098,8 +3098,10 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)<br>
 <br>
                 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */<br>
                 if (adev->in_s0ix &&<br>
-                   (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&<br>
-                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))<br>
+                   (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=<br>
+                    IP_VERSION(5, 0, 0)) &&<br>
+                   (adev->ip_blocks[i].version->type ==<br>
+                    AMD_IP_BLOCK_TYPE_SDMA))<br>
                         continue;<br>
 <br>
                 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.<br>
@@ -3590,8 +3592,8 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)<br>
                 adev->gfx.mcbp = true;<br>
         else if (amdgpu_mcbp == 0)<br>
                 adev->gfx.mcbp = false;<br>
-       else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&<br>
-                (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&<br>
+       else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&<br>
+                (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&<br>
                  adev->gfx.num_gfx_rings)<br>
                 adev->gfx.mcbp = true;<br>
 <br>
@@ -3811,7 +3813,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,<br>
          * internal path natively support atomics, set have_atomics_support to true.<br>
          */<br>
         } else if ((adev->flags & AMD_IS_APU) &&<br>
-                  (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {<br>
+                  (amdgpu_ip_version(adev, GC_HWIP, 0) ><br>
+                   IP_VERSION(9, 0, 0))) {<br>
                 adev->have_atomics_support = true;<br>
         } else {<br>
                 adev->have_atomics_support =<br>
@@ -5444,8 +5447,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,<br>
                         adev->asic_reset_res = r;<br>
 <br>
                 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */<br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-                   adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(9, 4, 2) ||<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))<br>
                         amdgpu_ras_resume(adev);<br>
         } else {<br>
                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);<br>
@@ -5470,7 +5474,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,<br>
                         drm_sched_start(&ring->sched, true);<br>
                 }<br>
 <br>
-               if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))<br>
+               if (adev->enable_mes &&<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))<br>
                         amdgpu_mes_self_test(tmp_adev);<br>
 <br>
                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)<br>
@@ -6147,7 +6152,7 @@ bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)<br>
                 return true;<br>
         default:<br>
                 /* IP discovery */<br>
-               if (!adev->ip_versions[DCE_HWIP][0] ||<br>
+               if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||<br>
                     (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))<br>
                         return false;<br>
                 return true;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c<br>
index 9ab33b0bbbad..430ee7f64a97 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c<br>
@@ -311,8 +311,8 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)<br>
          * So far, apply this quirk only on those Navy Flounder boards which<br>
          * have a bad harvest table of VCN config.<br>
          */<br>
-       if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&<br>
-               (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {<br>
+       if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {<br>
                 switch (adev->pdev->revision) {<br>
                 case 0xC1:<br>
                 case 0xC2:<br>
@@ -1363,8 +1363,8 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)<br>
          * so read harvest bit per IP data structure to set<br>
          * harvest configuration.<br>
          */<br>
-       if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&<br>
-           adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {<br>
                 if ((adev->pdev->device == 0x731E &&<br>
                         (adev->pdev->revision == 0xC6 ||<br>
                          adev->pdev->revision == 0xC7)) ||<br>
@@ -1607,7 +1607,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)<br>
 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
         /* what IP to use for this? */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 1):<br>
@@ -1645,7 +1645,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)<br>
         default:<br>
                 dev_err(adev->dev,<br>
                         "Failed to add common ip block(GC_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[GC_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, GC_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1654,7 +1654,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)<br>
 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
         /* use GC or MMHUB IP version */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 1):<br>
@@ -1690,9 +1690,8 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)<br>
                 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);<br>
                 break;<br>
         default:<br>
-               dev_err(adev->dev,<br>
-                       "Failed to add gmc ip block(GC_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[GC_HWIP][0]);<br>
+               dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",<br>
+                       amdgpu_ip_version(adev, GC_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1700,7 +1699,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[OSSSYS_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
         case IP_VERSION(4, 0, 1):<br>
         case IP_VERSION(4, 1, 0):<br>
@@ -1733,7 +1732,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)<br>
         default:<br>
                 dev_err(adev->dev,<br>
                         "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[OSSSYS_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, OSSSYS_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1741,7 +1740,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
                 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);<br>
                 break;<br>
@@ -1787,7 +1786,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)<br>
         default:<br>
                 dev_err(adev->dev,<br>
                         "Failed to add psp ip block(MP0_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[MP0_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, MP0_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1795,7 +1794,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
         case IP_VERSION(10, 0, 0):<br>
         case IP_VERSION(10, 0, 1):<br>
@@ -1836,7 +1835,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)<br>
         default:<br>
                 dev_err(adev->dev,<br>
                         "Failed to add smu ip block(MP1_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[MP1_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, MP1_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1861,8 +1860,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)<br>
                 return 0;<br>
 <br>
 #if defined(CONFIG_DRM_AMD_DC)<br>
-       if (adev->ip_versions[DCE_HWIP][0]) {<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                 case IP_VERSION(2, 0, 2):<br>
@@ -1888,11 +1887,11 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)<br>
                 default:<br>
                         dev_err(adev->dev,<br>
                                 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",<br>
-                               adev->ip_versions[DCE_HWIP][0]);<br>
+                               amdgpu_ip_version(adev, DCE_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
-       } else if (adev->ip_versions[DCI_HWIP][0]) {<br>
-               switch (adev->ip_versions[DCI_HWIP][0]) {<br>
+       } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {<br>
+               switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {<br>
                 case IP_VERSION(12, 0, 0):<br>
                 case IP_VERSION(12, 0, 1):<br>
                 case IP_VERSION(12, 1, 0):<br>
@@ -1904,7 +1903,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)<br>
                 default:<br>
                         dev_err(adev->dev,<br>
                                 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",<br>
-                               adev->ip_versions[DCI_HWIP][0]);<br>
+                               amdgpu_ip_version(adev, DCI_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
         }<br>
@@ -1914,7 +1913,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 1):<br>
@@ -1952,9 +1951,8 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)<br>
                 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);<br>
                 break;<br>
         default:<br>
-               dev_err(adev->dev,<br>
-                       "Failed to add gfx ip block(GC_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[GC_HWIP][0]);<br>
+               dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",<br>
+                       amdgpu_ip_version(adev, GC_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -1962,7 +1960,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
         case IP_VERSION(4, 0, 1):<br>
         case IP_VERSION(4, 1, 0):<br>
@@ -2002,7 +2000,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)<br>
         default:<br>
                 dev_err(adev->dev,<br>
                         "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",<br>
-                       adev->ip_versions[SDMA0_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, SDMA0_HWIP, 0));<br>
                 return -EINVAL;<br>
         }<br>
         return 0;<br>
@@ -2010,8 +2008,8 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       if (adev->ip_versions[VCE_HWIP][0]) {<br>
-               switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+       if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {<br>
+               switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
                 case IP_VERSION(7, 0, 0):<br>
                 case IP_VERSION(7, 2, 0):<br>
                         /* UVD is not supported on vega20 SR-IOV */<br>
@@ -2021,10 +2019,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)<br>
                 default:<br>
                         dev_err(adev->dev,<br>
                                 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",<br>
-                               adev->ip_versions[UVD_HWIP][0]);<br>
+                               amdgpu_ip_version(adev, UVD_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
-               switch (adev->ip_versions[VCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {<br>
                 case IP_VERSION(4, 0, 0):<br>
                 case IP_VERSION(4, 1, 0):<br>
                         /* VCE is not supported on vega20 SR-IOV */<br>
@@ -2034,11 +2032,11 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)<br>
                 default:<br>
                         dev_err(adev->dev,<br>
                                 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",<br>
-                               adev->ip_versions[VCE_HWIP][0]);<br>
+                               amdgpu_ip_version(adev, VCE_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
         } else {<br>
-               switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                         amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);<br>
@@ -2089,7 +2087,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)<br>
                 default:<br>
                         dev_err(adev->dev,<br>
                                 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",<br>
-                               adev->ip_versions[UVD_HWIP][0]);<br>
+                               amdgpu_ip_version(adev, UVD_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
         }<br>
@@ -2098,7 +2096,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -2136,7 +2134,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 3):<br>
                 aqua_vanjaram_init_soc_config(adev);<br>
                 break;<br>
@@ -2147,7 +2145,7 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[VPE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {<br>
         case IP_VERSION(6, 1, 0):<br>
                 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);<br>
                 break;<br>
@@ -2160,7 +2158,7 @@ static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)<br>
 <br>
 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[VCN_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 5):<br>
                 if (amdgpu_umsch_mm & 0x1) {<br>
                         amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);<br>
@@ -2354,7 +2352,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
         amdgpu_discovery_init_soc_config(adev);<br>
         amdgpu_discovery_sysfs_init(adev);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -2408,7 +2406,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 return -EINVAL;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 2):<br>
         case IP_VERSION(9, 3, 0):<br>
@@ -2427,11 +2425,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))<br>
+       if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))<br>
                 adev->gmc.xgmi.supported = true;<br>
 <br>
         /* set NBIO version */<br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(6, 1, 0):<br>
         case IP_VERSION(6, 2, 0):<br>
                 adev->nbio.funcs = &nbio_v6_1_funcs;<br>
@@ -2493,7 +2491,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[HDP_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
         case IP_VERSION(4, 0, 1):<br>
         case IP_VERSION(4, 1, 0):<br>
@@ -2525,7 +2523,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[DF_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {<br>
         case IP_VERSION(3, 6, 0):<br>
         case IP_VERSION(3, 6, 1):<br>
         case IP_VERSION(3, 6, 2):<br>
@@ -2545,7 +2543,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[SMUIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(10, 0, 0):<br>
@@ -2588,7 +2586,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[LSDMA_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {<br>
         case IP_VERSION(6, 0, 0):<br>
         case IP_VERSION(6, 0, 1):<br>
         case IP_VERSION(6, 0, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c<br>
index 363e6a2cad8c..0cacd0b9f8be 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c<br>
@@ -766,11 +766,13 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)<br>
                         return -EINVAL;<br>
                 }<br>
 <br>
-               if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0))<br>
                         version = AMD_FMT_MOD_TILE_VER_GFX11;<br>
-               else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))<br>
+               else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=<br>
+                        IP_VERSION(10, 3, 0))<br>
                         version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;<br>
-               else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))<br>
+               else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=<br>
+                        IP_VERSION(10, 0, 0))<br>
                         version = AMD_FMT_MOD_TILE_VER_GFX10;<br>
                 else<br>
                         version = AMD_FMT_MOD_TILE_VER_GFX9;<br>
@@ -779,13 +781,15 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)<br>
                 case 0: /* Z microtiling */<br>
                         return -EINVAL;<br>
                 case 1: /* S microtiling */<br>
-                       if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {<br>
+                       if (amdgpu_ip_version(adev, GC_HWIP, 0) <<br>
+                           IP_VERSION(11, 0, 0)) {<br>
                                 if (!has_xor)<br>
                                         version = AMD_FMT_MOD_TILE_VER_GFX9;<br>
                         }<br>
                         break;<br>
                 case 2:<br>
-                       if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {<br>
+                       if (amdgpu_ip_version(adev, GC_HWIP, 0) <<br>
+                           IP_VERSION(11, 0, 0)) {<br>
                                 if (!has_xor && afb->base.format->cpp[0] != 4)<br>
                                         version = AMD_FMT_MOD_TILE_VER_GFX9;<br>
                         }<br>
@@ -838,10 +842,12 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)<br>
                         u64 render_dcc_offset;<br>
 <br>
                         /* Enable constant encode on RAVEN2 and later. */<br>
-                       bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN ||<br>
-                                                  (adev->asic_type == CHIP_RAVEN &&<br>
-                                                   adev->external_rev_id >= 0x81)) &&<br>
-                                                   adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0);<br>
+                       bool dcc_constant_encode =<br>
+                               (adev->asic_type > CHIP_RAVEN ||<br>
+                                (adev->asic_type == CHIP_RAVEN &&<br>
+                                 adev->external_rev_id >= 0x81)) &&<br>
+                               amdgpu_ip_version(adev, GC_HWIP, 0) <<br>
+                                       IP_VERSION(11, 0, 0);<br>
 <br>
                         int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :<br>
                                               dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :<br>
@@ -878,7 +884,9 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)<br>
                                 if (adev->family >= AMDGPU_FAMILY_NV) {<br>
                                         int extra_pipe = 0;<br>
 <br>
-                                       if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&<br>
+                                       if ((amdgpu_ip_version(adev, GC_HWIP,<br>
+                                                              0) >=<br>
+                                            IP_VERSION(10, 3, 0)) &&<br>
                                             pipes == packers && pipes > 1)<br>
                                                 extra_pipe = 1;<br>
 <br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
index ef713806dd60..6686b911fb4a 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
@@ -2251,7 +2251,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)<br>
                 pm_runtime_forbid(dev->dev);<br>
         }<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&<br>
             !amdgpu_sriov_vf(adev)) {<br>
                 bool need_to_reset_gpu = false;<br>
 <br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c<br>
index e163cb0bacd8..709a2c1b9d63 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c<br>
@@ -570,7 +570,8 @@ static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)<br>
         switch (ring->funcs->type) {<br>
         case AMDGPU_RING_TYPE_SDMA:<br>
         /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=<br>
+                   IP_VERSION(5, 0, 0))<br>
                         is_gfx_power_domain = true;<br>
                 break;<br>
         case AMDGPU_RING_TYPE_GFX:<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c<br>
index 2382921710ec..9a158018ae16 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c<br>
@@ -158,7 +158,7 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)<br>
                 return amdgpu_compute_multipipe == 1;<br>
         }<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))<br>
                 return true;<br>
 <br>
         /* FIXME: spreading the queues across pipes causes perf regressions<br>
@@ -385,7 +385,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,<br>
         u32 domain = AMDGPU_GEM_DOMAIN_GTT;<br>
 <br>
         /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */<br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))<br>
                 domain |= AMDGPU_GEM_DOMAIN_VRAM;<br>
 <br>
         /* create MQD for KIQ */<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c<br>
index 5f7641d9f346..de7b379a9cc8 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c<br>
@@ -588,7 +588,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)<br>
  */<br>
 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         /* RAVEN */<br>
         case IP_VERSION(9, 2, 2):<br>
         case IP_VERSION(9, 1, 0):<br>
@@ -652,7 +652,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)<br>
 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)<br>
 {<br>
         struct amdgpu_gmc *gmc = &adev->gmc;<br>
-       uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];<br>
+       uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);<br>
         bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||<br>
                                 gc_ver == IP_VERSION(9, 3, 0) ||<br>
                                 gc_ver == IP_VERSION(9, 4, 0) ||<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c<br>
index d462b36adf4b..4e32c428c613 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c<br>
@@ -501,18 +501,22 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,<br>
         if (adev->asic_type >= CHIP_VEGA10) {<br>
                 switch (type) {<br>
                 case AMD_IP_BLOCK_TYPE_GFX:<br>
-                       result->ip_discovery_version = adev->ip_versions[GC_HWIP][0];<br>
+                       result->ip_discovery_version =<br>
+                               amdgpu_ip_version(adev, GC_HWIP, 0);<br>
                         break;<br>
                 case AMD_IP_BLOCK_TYPE_SDMA:<br>
-                       result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0];<br>
+                       result->ip_discovery_version =<br>
+                               amdgpu_ip_version(adev, SDMA0_HWIP, 0);<br>
                         break;<br>
                 case AMD_IP_BLOCK_TYPE_UVD:<br>
                 case AMD_IP_BLOCK_TYPE_VCN:<br>
                 case AMD_IP_BLOCK_TYPE_JPEG:<br>
-                       result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0];<br>
+                       result->ip_discovery_version =<br>
+                               amdgpu_ip_version(adev, UVD_HWIP, 0);<br>
                         break;<br>
                 case AMD_IP_BLOCK_TYPE_VCE:<br>
-                       result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0];<br>
+                       result->ip_discovery_version =<br>
+                               amdgpu_ip_version(adev, VCE_HWIP, 0);<br>
                         break;<br>
                 default:<br>
                         result->ip_discovery_version = 0;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c<br>
index 10ce5557bb11..70fe3b39c004 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c<br>
@@ -131,7 +131,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)<br>
                 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;<br>
 <br>
         for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <<br>
+                   IP_VERSION(6, 0, 0))<br>
                         adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;<br>
                 /* zero sdma_hqd_mask for non-existent engine */<br>
                 else if (adev->sdma.num_instances == 1)<br>
@@ -1339,8 +1340,10 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)<br>
 <br>
         for (i = 0; i < ARRAY_SIZE(queue_types); i++) {<br>
                 /* On GFX v10.3, fw hasn't supported to map sdma queue. */<br>
-               if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&<br>
-                   adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) >=<br>
+                           IP_VERSION(10, 3, 0) &&<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) <<br>
+                           IP_VERSION(11, 0, 0) &&<br>
                     queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)<br>
                         continue;<br>
 <br>
@@ -1401,7 +1404,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,<br>
                                        sizeof(ucode_prefix));<br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {<br>
                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",<br>
                          ucode_prefix,<br>
                          pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c<br>
index ed0955ccd3d7..72ee66db182c 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c<br>
@@ -100,7 +100,7 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp<br>
                 return;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 4):<br>
         case IP_VERSION(11, 0, 5):<br>
@@ -128,7 +128,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp)<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
         case IP_VERSION(11, 0, 7):<br>
         case IP_VERSION(11, 0, 9):<br>
@@ -162,7 +162,7 @@ static int psp_early_init(void *handle)<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
         struct psp_context *psp = &adev->psp;<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
                 psp_v3_1_set_psp_funcs(psp);<br>
                 psp->autoload_supported = false;<br>
@@ -334,7 +334,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,<br>
         bool ret = false;<br>
         int i;<br>
 <br>
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6))<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))<br>
                 return false;<br>
 <br>
         db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;<br>
@@ -413,7 +413,7 @@ static int psp_sw_init(void *handle)<br>
 <br>
         adev->psp.xgmi_context.supports_extended_data =<br>
                 !adev->gmc.xgmi.connected_to_cpu &&<br>
-                       adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);<br>
+               amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);<br>
 <br>
         memset(&scpm_entry, 0, sizeof(scpm_entry));<br>
         if ((psp_get_runtime_db_entry(adev,<br>
@@ -773,7 +773,7 @@ static int psp_load_toc(struct psp_context *psp,<br>
 <br>
 static bool psp_boottime_tmr(struct psp_context *psp)<br>
 {<br>
-       switch (psp->adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 6):<br>
                 return true;<br>
         default:<br>
@@ -828,7 +828,7 @@ static int psp_tmr_init(struct psp_context *psp)<br>
 <br>
 static bool psp_skip_tmr(struct psp_context *psp)<br>
 {<br>
-       switch (psp->adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 9):<br>
         case IP_VERSION(11, 0, 7):<br>
         case IP_VERSION(13, 0, 2):<br>
@@ -1215,8 +1215,8 @@ int psp_xgmi_terminate(struct psp_context *psp)<br>
         struct amdgpu_device *adev = psp->adev;<br>
 <br>
         /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */<br>
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||<br>
-           (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||<br>
+           (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&<br>
              adev->gmc.xgmi.connected_to_cpu))<br>
                 return 0;<br>
 <br>
@@ -1313,9 +1313,11 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)<br>
 <br>
 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)<br>
 {<br>
-       return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&<br>
+       return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==<br>
+                       IP_VERSION(13, 0, 2) &&<br>
                 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||<br>
-               psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6);<br>
+              amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=<br>
+                      IP_VERSION(13, 0, 6);<br>
 }<br>
 <br>
 /*<br>
@@ -1424,8 +1426,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,<br>
         if (psp_xgmi_peer_link_info_supported(psp)) {<br>
                 struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;<br>
                 bool requires_reflection =<br>
-                       (psp->xgmi_context.supports_extended_data && get_extended_data) ||<br>
-                               psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6);<br>
+                       (psp->xgmi_context.supports_extended_data &&<br>
+                        get_extended_data) ||<br>
+                       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==<br>
+                               IP_VERSION(13, 0, 6);<br>
 <br>
                 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;<br>
 <br>
@@ -2517,10 +2521,9 @@ static int psp_load_smu_fw(struct psp_context *psp)<br>
         if (!ucode->fw || amdgpu_sriov_vf(psp->adev))<br>
                 return 0;<br>
 <br>
-       if ((amdgpu_in_reset(adev) &&<br>
-            ras && adev->ras_enabled &&<br>
-            (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||<br>
-             adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {<br>
+       if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&<br>
+            (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||<br>
+             amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {<br>
                 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);<br>
                 if (ret)<br>
                         DRM_WARN("Failed to set MP1 state prepare for reload\n");<br>
@@ -2603,9 +2606,12 @@ static int psp_load_non_psp_fw(struct psp_context *psp)<br>
                         continue;<br>
 <br>
                 if (psp->autoload_supported &&<br>
-                   (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||<br>
-                    adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||<br>
-                    adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&<br>
+                   (amdgpu_ip_version(adev, MP0_HWIP, 0) ==<br>
+                            IP_VERSION(11, 0, 7) ||<br>
+                    amdgpu_ip_version(adev, MP0_HWIP, 0) ==<br>
+                            IP_VERSION(11, 0, 11) ||<br>
+                    amdgpu_ip_version(adev, MP0_HWIP, 0) ==<br>
+                            IP_VERSION(11, 0, 12)) &&<br>
                     (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||<br>
                      ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||<br>
                      ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))<br>
@@ -3146,7 +3152,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)<br>
                 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);<br>
 <br>
         if (adev->gmc.xgmi.connected_to_cpu ||<br>
-           (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {<br>
+           (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {<br>
                 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);<br>
                 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);<br>
 <br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c<br>
index 632478874f7d..eeb695f9ff1c 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c<br>
@@ -201,8 +201,8 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,<br>
                 return -EINVAL;<br>
 <br>
         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */<br>
-       if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&<br>
-           obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {<br>
+       if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&<br>
+           amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {<br>
                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))<br>
                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");<br>
         }<br>
@@ -611,8 +611,8 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,<br>
         if (amdgpu_ras_query_error_status(obj->adev, &info))<br>
                 return -EINVAL;<br>
 <br>
-       if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&<br>
-           obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {<br>
+       if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&<br>
+           amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {<br>
                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))<br>
                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");<br>
         }<br>
@@ -1208,8 +1208,8 @@ static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,<br>
 <br>
         /* some hardware/IP supports read to clear<br>
          * no need to explictly reset the err status after the query call */<br>
-       if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&<br>
-           adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&<br>
+           amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {<br>
                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))<br>
                         dev_warn(adev->dev,<br>
                                  "Failed to reset error counter and error status\n");<br>
@@ -1905,14 +1905,18 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)<br>
                  * should be removed until smu fix handle ecc_info table.<br>
                  */<br>
                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&<br>
-                       (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))<br>
+                   (amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                    IP_VERSION(13, 0, 2)))<br>
                         continue;<br>
 <br>
                 amdgpu_ras_query_error_status(adev, &info);<br>
 <br>
-               if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&<br>
-                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&<br>
-                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {<br>
+               if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=<br>
+                           IP_VERSION(11, 0, 2) &&<br>
+                   amdgpu_ip_version(adev, MP0_HWIP, 0) !=<br>
+                           IP_VERSION(11, 0, 4) &&<br>
+                   amdgpu_ip_version(adev, MP0_HWIP, 0) !=<br>
+                           IP_VERSION(13, 0, 0)) {<br>
                         if (amdgpu_ras_reset_error_status(adev, info.head.block))<br>
                                 dev_warn(adev->dev, "Failed to reset error counter and error status");<br>
                 }<br>
@@ -2400,7 +2404,7 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)<br>
 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)<br>
 {<br>
         if (amdgpu_sriov_vf(adev)) {<br>
-               switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
                 case IP_VERSION(13, 0, 2):<br>
                 case IP_VERSION(13, 0, 6):<br>
                         return true;<br>
@@ -2410,7 +2414,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)<br>
         }<br>
 <br>
         if (adev->asic_type == CHIP_IP_DISCOVERY) {<br>
-               switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
                 case IP_VERSION(13, 0, 0):<br>
                 case IP_VERSION(13, 0, 6):<br>
                 case IP_VERSION(13, 0, 10):<br>
@@ -2484,8 +2488,10 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)<br>
                         /* VCN/JPEG RAS can be supported on both bare metal and<br>
                          * SRIOV environment<br>
                          */<br>
-                       if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||<br>
-                           adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))<br>
+                       if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==<br>
+                                   IP_VERSION(2, 6, 0) ||<br>
+                           amdgpu_ip_version(adev, VCN_HWIP, 0) ==<br>
+                                   IP_VERSION(4, 0, 0))<br>
                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |<br>
                                                         1 << AMDGPU_RAS_BLOCK__JPEG);<br>
                         else<br>
@@ -2519,7 +2525,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)<br>
          * Disable ras feature for aqua vanjaram<br>
          * by default on apu platform.<br>
          */<br>
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) &&<br>
             adev->gmc.is_app_apu)<br>
                 adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :<br>
                         adev->ras_hw_enabled & amdgpu_ras_mask;<br>
@@ -2634,7 +2640,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)<br>
         /* initialize nbio ras function ahead of any other<br>
          * ras functions so hardware fatal error interrupt<br>
          * can be enabled as early as possible */<br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 4, 0):<br>
         case IP_VERSION(7, 4, 1):<br>
         case IP_VERSION(7, 4, 4):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c<br>
index 595d5e535aca..8ced4be784e0 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c<br>
@@ -153,7 +153,7 @@<br>
 <br>
 static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */<br>
         case IP_VERSION(11, 0, 7): /* Sienna cichlid */<br>
         case IP_VERSION(13, 0, 0):<br>
@@ -191,7 +191,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,<br>
                 return true;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 2):<br>
                 /* VEGA20 and ARCTURUS */<br>
                 if (adev->asic_type == CHIP_VEGA20)<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c<br>
index 02d874799c16..970bfece775c 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c<br>
@@ -30,7 +30,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)<br>
 {<br>
         int ret = 0;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 2):<br>
         case IP_VERSION(13, 0, 6):<br>
                 ret = aldebaran_reset_init(adev);<br>
@@ -52,7 +52,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)<br>
 {<br>
         int ret = 0;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 2):<br>
         case IP_VERSION(13, 0, 6):<br>
                 ret = aldebaran_reset_fini(adev);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c<br>
index 572f861e3f70..e8cbc4142d80 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c<br>
@@ -251,8 +251,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,<br>
                                 else {<br>
                                         /* Use a single copy per SDMA firmware type. PSP uses the same instance for all<br>
                                          * groups of SDMAs */<br>
-                                       if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) &&<br>
-                                           adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&<br>
+                                       if (amdgpu_ip_version(adev, SDMA0_HWIP,<br>
+                                                             0) ==<br>
+                                                   IP_VERSION(4, 4, 2) &&<br>
+                                           adev->firmware.load_type ==<br>
+                                                   AMDGPU_FW_LOAD_PSP &&<br>
                                             adev->sdma.num_inst_per_aid == i) {<br>
                                                 break;<br>
                                         }<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c<br>
index 4e51dce3aab5..05991c5c8ddb 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c<br>
@@ -1727,7 +1727,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)<br>
                 reserve_size =<br>
                         amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);<br>
 <br>
-       if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (!adev->bios &&<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 reserve_size = max(reserve_size, (uint32_t)280 << 20);<br>
         else if (!reserve_size)<br>
                 reserve_size = DISCOVERY_TMR_OFFSET;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c<br>
index 5aff383473f2..771ef8017a98 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c<br>
@@ -1119,7 +1119,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)<br>
 static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)<br>
 {<br>
         if (block_type == MP0_HWIP) {<br>
-               switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
                 case IP_VERSION(9, 0, 0):<br>
                         switch (adev->asic_type) {<br>
                         case CHIP_VEGA10:<br>
@@ -1170,7 +1170,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl<br>
                         return "yellow_carp";<br>
                 }<br>
         } else if (block_type == MP1_HWIP) {<br>
-               switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                 case IP_VERSION(9, 0, 0):<br>
                 case IP_VERSION(10, 0, 0):<br>
                 case IP_VERSION(10, 0, 1):<br>
@@ -1196,7 +1196,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl<br>
                         return "aldebaran_smc";<br>
                 }<br>
         } else if (block_type == SDMA0_HWIP) {<br>
-               switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
                 case IP_VERSION(4, 0, 0):<br>
                         return "vega10_sdma";<br>
                 case IP_VERSION(4, 0, 1):<br>
@@ -1240,7 +1240,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl<br>
                         return "vangogh_sdma";<br>
                 }<br>
         } else if (block_type == UVD_HWIP) {<br>
-               switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                         if (adev->apu_flags & AMD_APU_IS_RAVEN2)<br>
@@ -1265,7 +1265,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl<br>
                 case IP_VERSION(3, 0, 0):<br>
                 case IP_VERSION(3, 0, 64):<br>
                 case IP_VERSION(3, 0, 192):<br>
-                       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))<br>
+                       if (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(10, 3, 0))<br>
                                 return "sienna_cichlid_vcn";<br>
                         return "navy_flounder_vcn";<br>
                 case IP_VERSION(3, 0, 2):<br>
@@ -1278,7 +1279,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl<br>
                         return "yellow_carp_vcn";<br>
                 }<br>
         } else if (block_type == GC_HWIP) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(9, 0, 1):<br>
                         return "vega10";<br>
                 case IP_VERSION(9, 2, 1):<br>
@@ -1331,7 +1332,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,<br>
         int maj, min, rev;<br>
         char *ip_name;<br>
         const char *legacy;<br>
-       uint32_t version = adev->ip_versions[block_type][0];<br>
+       uint32_t version = amdgpu_ip_version(adev, block_type, 0);<br>
 <br>
         legacy = amdgpu_ucode_legacy_naming(adev, block_type);<br>
         if (legacy) {<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c<br>
index db0d94ca4ffc..24fcc9a2e422 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c<br>
@@ -28,7 +28,7 @@ static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,<br>
                                     struct ras_err_data *err_data, uint64_t err_addr,<br>
                                     uint32_t ch_inst, uint32_t umc_inst)<br>
 {<br>
-       switch (adev->ip_versions[UMC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {<br>
         case IP_VERSION(6, 7, 0):<br>
                 umc_v6_7_convert_error_address(adev,<br>
                                 err_data, err_addr, ch_inst, umc_inst);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c<br>
index 9da80b54d63e..aeff9926412f 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c<br>
@@ -581,7 +581,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)<br>
         const char *fw_name = NULL;<br>
         int r;<br>
 <br>
-       switch (adev->ip_versions[VCN_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 5):<br>
                 fw_name = "amdgpu/umsch_mm_4_0_0.bin";<br>
                 break;<br>
@@ -758,7 +758,7 @@ static int umsch_mm_early_init(void *handle)<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       switch (adev->ip_versions[VCN_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 5):<br>
                 umsch_mm_v4_0_set_funcs(&adev->umsch_mm);<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c<br>
index 80bcbe744e58..c93f3a4c0e31 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c<br>
@@ -126,7 +126,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)<br>
          * Hence, check for these versions here - notice this is<br>
          * restricted to Vangogh (Deck's APU).<br>
          */<br>
-       if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {<br>
+       if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {<br>
                 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);<br>
 <br>
                 if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||<br>
@@ -171,7 +171,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)<br>
         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)<br>
                 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);<br>
 <br>
-       if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) {<br>
+       if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {<br>
                 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));<br>
                 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);<br>
         } else {<br>
@@ -267,7 +267,7 @@ static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)<br>
         struct amdgpu_device *adev = ring->adev;<br>
         bool ret = false;<br>
 <br>
-       if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))<br>
+       if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))<br>
                 ret = true;<br>
 <br>
         return ret;<br>
@@ -998,7 +998,7 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)<br>
         struct amdgpu_device *adev = ring->adev;<br>
         long r;<br>
 <br>
-       if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) {<br>
+       if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {<br>
                 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);<br>
                 if (r)<br>
                         goto error;<br>
@@ -1048,7 +1048,8 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)<br>
                         adev->firmware.fw_size +=<br>
                                 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);<br>
 <br>
-                       if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3))<br>
+                       if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==<br>
+                           IP_VERSION(4, 0, 3))<br>
                                 break;<br>
                 }<br>
                 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c<br>
index 96857ae7fb5b..a0aa624f5a92 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c<br>
@@ -837,7 +837,7 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad<br>
 <br>
 void amdgpu_virt_post_reset(struct amdgpu_device *adev)<br>
 {<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {<br>
                 /* force set to GFXOFF state after reset,<br>
                  * to avoid some invalid operation before GC enable<br>
                  */<br>
@@ -847,7 +847,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev)<br>
 <br>
 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)<br>
 {<br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 0):<br>
                 /* no vf autoload, white list */<br>
                 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
index edaebabc8e60..a874aed2ab52 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
@@ -802,12 +802,12 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,<br>
          * heavy-weight flush TLB unconditionally.<br>
          */<br>
         flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&<br>
-                    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);<br>
+                    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);<br>
 <br>
         /*<br>
          * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB<br>
          */<br>
-       flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);<br>
+       flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);<br>
 <br>
         memset(&params, 0, sizeof(params));<br>
         params.adev = adev;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c<br>
index ae070072705a..a68354d03a49 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c<br>
@@ -123,7 +123,7 @@ static int vpe_early_init(void *handle)<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
         struct amdgpu_vpe *vpe = &adev->vpe;<br>
 <br>
-       switch (adev->ip_versions[VPE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {<br>
         case IP_VERSION(6, 1, 0):<br>
                 vpe_v6_1_set_funcs(vpe);<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c<br>
index 7e91b24784e5..061534e845a7 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c<br>
@@ -948,7 +948,8 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,<br>
         uint32_t field_array_size = 0;<br>
 <br>
         if (is_xgmi_pcs) {<br>
-               if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {<br>
+               if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==<br>
+                   IP_VERSION(6, 1, 0)) {<br>
                         pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];<br>
                         field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);<br>
                 } else {<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c<br>
index a13c443ea10f..42f4e163e251 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c<br>
@@ -68,7 +68,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 0):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c<br>
index a9521c98e7f7..5a122f50a6e7 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c<br>
@@ -77,7 +77,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(1, 3, 1):<br>
         case IP_VERSION(2, 0, 0):<br>
         case IP_VERSION(2, 0, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c<br>
index 78508ae6a670..e143fcc46148 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c<br>
@@ -70,7 +70,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
         case IP_VERSION(2, 1, 1):<br>
         case IP_VERSION(2, 1, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c<br>
index f0e235f98afb..5a318bc03d23 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c<br>
@@ -36,7 +36,7 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)<br>
 {<br>
         uint32_t data;<br>
 <br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 1):<br>
                 data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);<br>
                 break;<br>
@@ -49,7 +49,7 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)<br>
 <br>
 static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)<br>
 {<br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 1):<br>
                 WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);<br>
                 break;<br>
@@ -99,7 +99,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[ATHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 0):<br>
         case IP_VERSION(3, 0, 1):<br>
         case IP_VERSION(3, 0, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c<br>
index 62329a822022..35357364b5b3 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c<br>
@@ -3627,7 +3627,7 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
                 soc15_program_register_sequence(adev,<br>
                                                 golden_settings_gc_rlc_spm_10_0_nv10,<br>
@@ -3650,7 +3650,7 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
                 soc15_program_register_sequence(adev,<br>
                                                 golden_settings_gc_10_1,<br>
@@ -3891,7 +3891,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)<br>
 {<br>
         adev->gfx.cp_fw_write_wait = false;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 2):<br>
         case IP_VERSION(10, 1, 1):<br>
@@ -3942,7 +3942,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
                 if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))<br>
                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;<br>
@@ -3964,8 +3964,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)<br>
 <br>
         DRM_DEBUG("\n");<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) &&<br>
-          (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) &&<br>
+           (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)))<br>
                 wks = "_wks";<br>
         amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
 <br>
@@ -4144,7 +4144,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)<br>
         reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);<br>
         reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);<br>
         reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 reg_access_ctrl->spare_int =<br>
                         SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);<br>
@@ -4358,7 +4358,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)<br>
 {<br>
         u32 gb_addr_config;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -4491,7 +4491,7 @@ static int gfx_v10_0_sw_init(void *handle)<br>
         struct amdgpu_kiq *kiq;<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -4749,9 +4749,12 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)<br>
         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {<br>
                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {<br>
                         bitmap = i * adev->gfx.config.max_sh_per_se + j;<br>
-                       if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||<br>
-                               (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) ||<br>
-                               (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) &&<br>
+                       if (((amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 0)) ||<br>
+                            (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 3)) ||<br>
+                            (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 6))) &&<br>
                             ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))<br>
                                 continue;<br>
                         gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);<br>
@@ -4779,7 +4782,7 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade<br>
         /* for ASICs that integrates GFX v10.3<br>
          * pa_sc_tile_steering_override should be set to 0<br>
          */<br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))<br>
                 return 0;<br>
 <br>
         /* init num_sc */<br>
@@ -4960,7 +4963,7 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)<br>
         /* TCCs are global (not instanced). */<br>
         uint32_t tcc_disable;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) {<br>
                 tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |<br>
                               RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);<br>
         } else {<br>
@@ -5037,7 +5040,7 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)<br>
         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);<br>
 <br>
         /* csib */<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) {<br>
                 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,<br>
                                 adev->gfx.rlc.clear_state_gpu_addr >> 32);<br>
                 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,<br>
@@ -5666,7 +5669,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)<br>
         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);<br>
         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2))<br>
                 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);<br>
         else<br>
                 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);<br>
@@ -6057,7 +6060,7 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,<br>
                 }<br>
                 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);<br>
         }<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -6190,7 +6193,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)<br>
 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)<br>
 {<br>
         if (enable) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(10, 3, 0):<br>
                 case IP_VERSION(10, 3, 2):<br>
                 case IP_VERSION(10, 3, 1):<br>
@@ -6206,7 +6209,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)<br>
                         break;<br>
                 }<br>
         } else {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(10, 3, 0):<br>
                 case IP_VERSION(10, 3, 2):<br>
                 case IP_VERSION(10, 3, 1):<br>
@@ -6306,7 +6309,7 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)<br>
         struct amdgpu_device *adev = ring->adev;<br>
 <br>
         /* tell RLC which is KIQ queue */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -6917,7 +6920,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)<br>
          * check if mmVGT_ESGS_RING_SIZE_UMD<br>
          * has been remapped to mmVGT_ESGS_RING_SIZE<br>
          */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 4):<br>
@@ -6966,7 +6969,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)<br>
          */<br>
         WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -7139,19 +7142,19 @@ static int gfx_v10_0_hw_init(void *handle)<br>
          * init golden registers and rlc resume may override some registers,<br>
          * reconfig them here<br>
          */<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 10) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2))<br>
                 gfx_v10_0_tcp_harvest(adev);<br>
 <br>
         r = gfx_v10_0_cp_resume(adev);<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))<br>
                 gfx_v10_3_program_pbb_mode(adev);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))<br>
                 gfx_v10_3_set_power_brake_sequence(adev);<br>
 <br>
         return r;<br>
@@ -7255,7 +7258,7 @@ static int gfx_v10_0_soft_reset(void *handle)<br>
 <br>
         /* GRBM_STATUS2 */<br>
         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -7312,7 +7315,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)<br>
 {<br>
         uint64_t clock, clock_lo, clock_hi, hi_check;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 1):<br>
         case IP_VERSION(10, 3, 3):<br>
         case IP_VERSION(10, 3, 7):<br>
@@ -7399,7 +7402,7 @@ static int gfx_v10_0_early_init(void *handle)<br>
 <br>
         adev->gfx.funcs = &gfx_v10_0_gfx_funcs;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -7470,7 +7473,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)<br>
         data = RLC_SAFE_MODE__CMD_MASK;<br>
         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -7508,7 +7511,7 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)<br>
         uint32_t data;<br>
 <br>
         data = RLC_SAFE_MODE__CMD_MASK;<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -7819,7 +7822,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d<br>
                 mmCGTS_SA1_QUAD1_SM_CTRL_REG<br>
         };<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) {<br>
                 for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {<br>
                         reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +<br>
                                   tcp_ctrl_regs_nv12[i];<br>
@@ -7864,9 +7867,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,<br>
                 /* ===  CGCG + CGLS === */<br>
                 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);<br>
 <br>
-               if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) ||<br>
-                   (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) ||<br>
-                   (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)))<br>
+               if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                    IP_VERSION(10, 1, 10)) ||<br>
+                   (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                    IP_VERSION(10, 1, 1)) ||<br>
+                   (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                    IP_VERSION(10, 1, 2)))<br>
                         gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);<br>
         } else {<br>
                 /* CGCG/CGLS should be disabled before MGCG/MGLS<br>
@@ -7966,7 +7972,7 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)<br>
          * Power/performance team will optimize it and might give a new value later.<br>
          */<br>
         if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(10, 3, 1):<br>
                 case IP_VERSION(10, 3, 3):<br>
                 case IP_VERSION(10, 3, 6):<br>
@@ -8027,7 +8033,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -8064,7 +8070,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -9311,7 +9317,7 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 3):<br>
@@ -9428,10 +9434,14 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,<br>
         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {<br>
                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {<br>
                         bitmap = i * adev->gfx.config.max_sh_per_se + j;<br>
-                       if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||<br>
-                            (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) ||<br>
-                            (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6)) ||<br>
-                            (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 7))) &&<br>
+                       if (((amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 0)) ||<br>
+                            (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 3)) ||<br>
+                            (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 6)) ||<br>
+                            (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                             IP_VERSION(10, 3, 7))) &&<br>
                             ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))<br>
                                 continue;<br>
                         mask = 1;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c<br>
index 39c434ca0dad..42fc0cc13fdd 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c<br>
@@ -288,7 +288,7 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 1):<br>
         case IP_VERSION(11, 0, 4):<br>
                 soc15_program_register_sequence(adev,<br>
@@ -493,7 +493,7 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *<br>
 <br>
 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 2):<br>
         case IP_VERSION(11, 0, 3):<br>
@@ -884,8 +884,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {<br>
 <br>
 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)<br>
 {<br>
-<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 2):<br>
                 adev->gfx.config.max_hw_contexts = 8;<br>
@@ -1332,7 +1331,7 @@ static int gfx_v11_0_sw_init(void *handle)<br>
 <br>
         adev->gfxhub.funcs->init(adev);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 2):<br>
         case IP_VERSION(11, 0, 3):<br>
@@ -1364,8 +1363,8 @@ static int gfx_v11_0_sw_init(void *handle)<br>
         }<br>
 <br>
         /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&<br>
-               amdgpu_sriov_is_pp_one_vf(adev))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&<br>
+           amdgpu_sriov_is_pp_one_vf(adev))<br>
                 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;<br>
 <br>
         /* EOP Event */<br>
@@ -2592,9 +2591,11 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)<br>
         for (i = 0; i < adev->usec_timeout; i++) {<br>
                 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);<br>
 <br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||<br>
-                               adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4) ||<br>
-                               adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 5, 0))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(11, 0, 1) ||<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(11, 0, 4) ||<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0))<br>
                         bootload_status = RREG32_SOC15(GC, 0,<br>
                                         regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);<br>
                 else<br>
@@ -5025,7 +5026,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)<br>
 <br>
         // Program RLC_PG_DELAY3 for CGPG hysteresis<br>
         if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(11, 0, 1):<br>
                 case IP_VERSION(11, 0, 4):<br>
                         WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);<br>
@@ -5054,7 +5055,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 2):<br>
         case IP_VERSION(11, 0, 3):<br>
@@ -5086,7 +5087,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 1):<br>
         case IP_VERSION(11, 0, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
index f99a3a6bfd91..e3ff6e46f3f7 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
@@ -895,7 +895,7 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
                 soc15_program_register_sequence(adev,<br>
                                                 golden_settings_gc_9_0,<br>
@@ -951,8 +951,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&<br>
-           (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))<br>
+       if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))<br>
                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,<br>
                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));<br>
 }<br>
@@ -1095,14 +1095,14 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)<br>
         adev->gfx.me_fw_write_wait = false;<br>
         adev->gfx.mec_fw_write_wait = false;<br>
 <br>
-       if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&<br>
+       if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&<br>
             ((adev->gfx.mec_fw_version < 0x000001a5) ||<br>
-           (adev->gfx.mec_feature_version < 46) ||<br>
-           (adev->gfx.pfp_fw_version < 0x000000b7) ||<br>
-           (adev->gfx.pfp_feature_version < 46)))<br>
+            (adev->gfx.mec_feature_version < 46) ||<br>
+            (adev->gfx.pfp_fw_version < 0x000000b7) ||<br>
+            (adev->gfx.pfp_feature_version < 46)))<br>
                 DRM_WARN_ONCE("CP firmware version too old, please update!");<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&<br>
                     (adev->gfx.me_feature_version >= 42) &&<br>
@@ -1202,7 +1202,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev)<br>
 <br>
 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)<br>
 {<br>
-       if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&<br>
+       if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&<br>
             (adev->gfx.me_fw_version >= 0x000000a5) &&<br>
             (adev->gfx.me_feature_version >= 52))<br>
                 return true;<br>
@@ -1215,7 +1215,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)<br>
         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))<br>
                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -1326,9 +1326,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,<br>
 <br>
 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)<br>
 {<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))<br>
                 return false;<br>
 <br>
         return true;<br>
@@ -1485,7 +1485,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)<br>
 <br>
         if (adev->flags & AMD_IS_APU)<br>
                 always_on_cu_num = 4;<br>
-       else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))<br>
+       else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))<br>
                 always_on_cu_num = 8;<br>
         else<br>
                 always_on_cu_num = 12;<br>
@@ -1836,7 +1836,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)<br>
         u32 gb_addr_config;<br>
         int err;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
                 adev->gfx.config.max_hw_contexts = 8;<br>
                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;<br>
@@ -2002,7 +2002,7 @@ static int gfx_v9_0_sw_init(void *handle)<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
         unsigned int hw_prio;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -2363,7 +2363,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)<br>
 {<br>
         uint32_t tmp;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 1):<br>
                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);<br>
                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,<br>
@@ -2700,7 +2700,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)<br>
                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */<br>
                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);<br>
                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);<br>
-               if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))<br>
                         pwr_10_0_gfxip_control_over_cgpg(adev, true);<br>
         }<br>
 }<br>
@@ -2812,7 +2812,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)<br>
          * And it's needed by gfxoff feature.<br>
          */<br>
         if (adev->gfx.rlc.is_rlc_v2_1) {<br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(9, 2, 1) ||<br>
                     (adev->apu_flags & AMD_APU_IS_RAVEN2))<br>
                         gfx_v9_1_init_rlc_save_restore_list(adev);<br>
                 gfx_v9_0_enable_save_restore_machine(adev);<br>
@@ -2925,7 +2926,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)<br>
                         return r;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 2, 2):<br>
         case IP_VERSION(9, 1, 0):<br>
                 gfx_v9_0_init_lbpw(adev);<br>
@@ -3713,8 +3714,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)<br>
 {<br>
         u32 tmp;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&<br>
-           adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))<br>
                 return;<br>
 <br>
         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);<br>
@@ -3754,7 +3755,7 @@ static int gfx_v9_0_hw_init(void *handle)<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))<br>
                 gfx_v9_4_2_set_power_brake_sequence(adev);<br>
 <br>
         return r;<br>
@@ -3802,7 +3803,7 @@ static int gfx_v9_0_hw_fini(void *handle)<br>
 <br>
         /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */<br>
         if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||<br>
-           (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {<br>
                 dev_dbg(adev->dev, "Skipping RLC halt\n");<br>
                 return 0;<br>
         }<br>
@@ -3986,7 +3987,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)<br>
 {<br>
         uint64_t clock, clock_lo, clock_hi, hi_check;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 3, 0):<br>
                 preempt_disable();<br>
                 clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);<br>
@@ -4005,7 +4006,9 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)<br>
         default:<br>
                 amdgpu_gfx_off_ctrl(adev, false);<br>
                 mutex_lock(&adev->gfx.gpu_clock_mutex);<br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(9, 0, 1) &&<br>
+                   amdgpu_sriov_runtime(adev)) {<br>
                         clock = gfx_v9_0_kiq_read_clock(adev);<br>
                 } else {<br>
                         WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);<br>
@@ -4357,7 +4360,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)<br>
         if (!ring->sched.ready)<br>
                 return 0;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {<br>
                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;<br>
                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);<br>
                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;<br>
@@ -4509,8 +4512,8 @@ static int gfx_v9_0_early_init(void *handle)<br>
 <br>
         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))<br>
                 adev->gfx.num_gfx_rings = 0;<br>
         else<br>
                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;<br>
@@ -4548,7 +4551,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)<br>
         }<br>
 <br>
         /* requires IBs so do in late init after IB pool is initialized */<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))<br>
                 r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);<br>
         else<br>
                 r = gfx_v9_0_do_edc_gpr_workarounds(adev);<br>
@@ -4580,7 +4583,7 @@ static int gfx_v9_0_late_init(void *handle)<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))<br>
                 gfx_v9_4_2_debug_trap_config_init(adev,<br>
                         adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);<br>
         else<br>
@@ -4676,7 +4679,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev<br>
                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */<br>
                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);<br>
 <br>
-               if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))<br>
                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;<br>
 <br>
                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |<br>
@@ -4710,7 +4713,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev<br>
                 /* 1 - MGCG_OVERRIDE */<br>
                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);<br>
 <br>
-               if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))<br>
                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;<br>
 <br>
                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |<br>
@@ -4816,7 +4819,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev<br>
                 /* enable cgcg FSM(0x0000363F) */<br>
                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);<br>
 <br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))<br>
                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |<br>
                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;<br>
                 else<br>
@@ -4951,7 +4954,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
         bool enable = (state == AMD_PG_STATE_GATE);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 2, 2):<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 3, 0):<br>
@@ -4998,7 +5001,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -5048,7 +5051,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)<br>
         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)<br>
                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {<br>
                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */<br>
                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));<br>
                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)<br>
@@ -7087,7 +7090,7 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -7106,7 +7109,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)<br>
 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)<br>
 {<br>
         /* init asci gds info */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 2, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
@@ -7128,7 +7131,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
         case IP_VERSION(9, 4, 0):<br>
                 adev->gds.gds_compute_max_wave_id = 0x7ff;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c<br>
index 32a740104868..fbfe0a1c4b19 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c<br>
@@ -682,7 +682,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)<br>
         adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;<br>
         adev->gfx.ras = &gfx_v9_4_3_ras;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 3):<br>
                 adev->gfx.config.max_hw_contexts = 8;<br>
                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;<br>
@@ -2430,7 +2430,7 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle,<br>
                 return 0;<br>
 <br>
         num_xcc = NUM_XCC(adev->gfx.xcc_mask);<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 3):<br>
                 for (i = 0; i < num_xcc; i++)<br>
                         gfx_v9_4_3_xcc_update_gfx_clock_gating(<br>
@@ -4231,7 +4231,7 @@ static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)<br>
 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)<br>
 {<br>
         /* init asci gds info */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 3):<br>
                 /* 9.4.3 removed all the GDS internal memory,<br>
                  * only support GWS opcode in kernel, like barrier<br>
@@ -4243,7 +4243,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 3):<br>
                 /* deprecated for 9.4.3, no usage at all */<br>
                 adev->gds.gds_compute_max_wave_id = 0;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c<br>
index 0834af771549..ff60670b8464 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c<br>
@@ -356,11 +356,14 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,<br>
                          * the SQ per-process.<br>
                          * Retry faults need to be enabled for that to work.<br>
                          */<br>
-                       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,<br>
-                                           RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,<br>
-                                           !adev->gmc.noretry ||<br>
-                                           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-                                           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3));<br>
+                       tmp = REG_SET_FIELD(<br>
+                               tmp, VM_CONTEXT1_CNTL,<br>
+                               RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,<br>
+                               !adev->gmc.noretry ||<br>
+                                       amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                                               IP_VERSION(9, 4, 2) ||<br>
+                                       amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                                               IP_VERSION(9, 4, 3));<br>
                         WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,<br>
                                             i * hub->ctx_distance, tmp);<br>
                         WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c<br>
index 7708d5ded7b8..f829c441640a 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c<br>
@@ -510,7 +510,7 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)<br>
         u32 max_num_physical_nodes   = 0;<br>
         u32 max_physical_node_id     = 0;<br>
 <br>
-       switch (adev->ip_versions[XGMI_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {<br>
         case IP_VERSION(4, 8, 0):<br>
                 max_num_physical_nodes   = 4;<br>
                 max_physical_node_id     = 3;<br>
@@ -548,7 +548,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)<br>
                 adev->gfx.config.max_sh_per_se *<br>
                 adev->gfx.config.max_shader_engines);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 1):<br>
         case IP_VERSION(10, 3, 3):<br>
                 /* Get SA disabled bitmap from eFuse setting */<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c<br>
index fa87a85e1017..d3da13f4c80e 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c<br>
@@ -145,7 +145,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,<br>
                  * the new fast GRBM interface.<br>
                  */<br>
                 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&<br>
-                   (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))<br>
+                   (amdgpu_ip_version(adev, GC_HWIP, 0) <<br>
+                    IP_VERSION(10, 3, 0)))<br>
                         RREG32(hub->vm_l2_pro_fault_status);<br>
 <br>
                 status = RREG32(hub->vm_l2_pro_fault_status);<br>
@@ -278,7 +279,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,<br>
          * to avoid a false ACK due to the new fast GRBM interface.<br>
          */<br>
         if ((vmhub == AMDGPU_GFXHUB(0)) &&<br>
-           (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0)))<br>
                 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +<br>
                                   hub->eng_distance * eng, hub_ip);<br>
 <br>
@@ -680,7 +681,7 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[UMC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {<br>
         case IP_VERSION(8, 7, 0):<br>
                 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;<br>
                 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;<br>
@@ -697,7 +698,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 3, 0):<br>
         case IP_VERSION(2, 4, 0):<br>
         case IP_VERSION(2, 4, 1):<br>
@@ -711,7 +712,7 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
@@ -825,7 +826,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)<br>
 <br>
         /* set the gart size */<br>
         if (amdgpu_gart_size == -1) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 default:<br>
                         adev->gmc.gart_size = 512ULL << 20;<br>
                         break;<br>
@@ -892,7 +893,7 @@ static int gmc_v10_0_sw_init(void *handle)<br>
                 adev->gmc.vram_vendor = vram_vendor;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 adev->gmc.mall_size = 128 * 1024 * 1024;<br>
                 break;<br>
@@ -910,7 +911,7 @@ static int gmc_v10_0_sw_init(void *handle)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
         case IP_VERSION(10, 1, 1):<br>
         case IP_VERSION(10, 1, 2):<br>
@@ -1195,7 +1196,8 @@ static int gmc_v10_0_set_clockgating_state(void *handle,<br>
          * is a new problem observed at DF 3.0.3, however with the same suspend sequence not<br>
          * seen any issue on the DF 3.0.2 series platform.<br>
          */<br>
-       if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {<br>
+       if (adev->in_s0ix &&<br>
+           amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) {<br>
                 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");<br>
                 return 0;<br>
         }<br>
@@ -1204,7 +1206,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))<br>
+       if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))<br>
                 return athub_v2_1_set_clockgating(adev, state);<br>
         else<br>
                 return athub_v2_0_set_clockgating(adev, state);<br>
@@ -1214,13 +1216,13 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))<br>
                 return;<br>
 <br>
         adev->mmhub.funcs->get_clockgating(adev, flags);<br>
 <br>
-       if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))<br>
+       if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))<br>
                 athub_v2_1_get_clockgating(adev, flags);<br>
         else<br>
                 athub_v2_0_get_clockgating(adev, flags);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c<br>
index 671e288c7575..e1f47f9c1881 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c<br>
@@ -588,7 +588,7 @@ static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[UMC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {<br>
         case IP_VERSION(8, 10, 0):<br>
                 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;<br>
                 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;<br>
@@ -611,7 +611,7 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 1):<br>
                 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;<br>
                 break;<br>
@@ -629,7 +629,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 3):<br>
                 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;<br>
                 break;<br>
@@ -782,7 +782,7 @@ static int gmc_v11_0_sw_init(void *handle)<br>
         adev->gmc.vram_type = vram_type;<br>
         adev->gmc.vram_vendor = vram_vendor;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 1):<br>
         case IP_VERSION(11, 0, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c<br>
index 3d13d0bba7b1..268ee533e7c1 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c<br>
@@ -640,7 +640,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,<br>
                 addr, entry->client_id,<br>
                 soc15_ih_clientid_name[entry->client_id]);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",<br>
                         node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,<br>
                         node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");<br>
@@ -654,7 +654,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,<br>
          * the new fast GRBM interface.<br>
          */<br>
         if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&<br>
-           (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))<br>
                 RREG32(hub->vm_l2_pro_fault_status);<br>
 <br>
         status = RREG32(hub->vm_l2_pro_fault_status);<br>
@@ -671,7 +671,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,<br>
                         gfxhub_client_ids[cid],<br>
                         cid);<br>
         } else {<br>
-               switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
                 case IP_VERSION(9, 0, 0):<br>
                         mmhub_cid = mmhub_client_ids_vega10[cid][rw];<br>
                         break;<br>
@@ -772,8 +772,8 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,<br>
 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,<br>
                                        uint32_t vmhub)<br>
 {<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 return false;<br>
 <br>
         return ((vmhub == AMDGPU_MMHUB0(0) ||<br>
@@ -824,7 +824,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,<br>
 <br>
         hub = &adev->vmhub[vmhub];<br>
         if (adev->gmc.xgmi.num_physical_nodes &&<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0)) {<br>
                 /* Vega20+XGMI caches PTEs in TC and TLB. Add a<br>
                  * heavy-weight TLB flush (type 2), which flushes<br>
                  * both. Due to a race condition with concurrent<br>
@@ -834,7 +834,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,<br>
                 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);<br>
                 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);<br>
         } else if (flush_type == 2 &&<br>
-                  adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&<br>
+                  amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&<br>
                    adev->rev_id == 0) {<br>
                 inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);<br>
                 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);<br>
@@ -896,7 +896,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,<br>
                  * GRBM interface.<br>
                  */<br>
                 if ((vmhub == AMDGPU_GFXHUB(0)) &&<br>
-                   (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))<br>
+                   (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))<br>
                         RREG32_NO_KIQ(hub->vm_inv_eng0_req +<br>
                                       hub->eng_distance * eng);<br>
 <br>
@@ -969,7 +969,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,<br>
                  * still need a second TLB flush after this.<br>
                  */<br>
                 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&<br>
-                                      adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));<br>
+                                      amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                                              IP_VERSION(9, 4, 0));<br>
                 /* 2 dwords flush + 8 dwords fence */<br>
                 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;<br>
 <br>
@@ -984,7 +985,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,<br>
                                                       pasid, 2, all_hub);<br>
 <br>
                 if (flush_type == 2 &&<br>
-                   adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&<br>
+                   amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                           IP_VERSION(9, 4, 3) &&<br>
                     adev->rev_id == 0)<br>
                         kiq->pmf->kiq_invalidate_tlbs(ring,<br>
                                                 pasid, 0, all_hub);<br>
@@ -1192,7 +1194,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,<br>
         bool snoop = false;<br>
         bool is_local;<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 1):<br>
         case IP_VERSION(9, 4, 2):<br>
                 if (is_vram) {<br>
@@ -1206,8 +1208,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,<br>
                                 /* FIXME: is this still needed? Or does<br>
                                  * amdgpu_ttm_tt_pde_flags already handle this?<br>
                                  */<br>
-                               if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-                                    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&<br>
+                               if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                                            IP_VERSION(9, 4, 2) ||<br>
+                                    amdgpu_ip_version(adev, GC_HWIP, 0) ==<br>
+                                            IP_VERSION(9, 4, 3)) &&<br>
                                     adev->gmc.xgmi.connected_to_cpu)<br>
                                         snoop = true;<br>
                         } else {<br>
@@ -1316,7 +1320,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,<br>
         /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system<br>
          * memory can use more efficient MTYPEs.<br>
          */<br>
-       if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))<br>
                 return;<br>
 <br>
         /* Only direct-mapped memory allows us to determine the NUMA node from<br>
@@ -1385,7 +1389,7 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)<br>
         } else {<br>
                 u32 viewport;<br>
 <br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);<br>
@@ -1456,7 +1460,7 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[UMC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {<br>
         case IP_VERSION(6, 0, 0):<br>
                 adev->umc.funcs = &umc_v6_0_funcs;<br>
                 break;<br>
@@ -1510,7 +1514,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 1):<br>
                 adev->mmhub.funcs = &mmhub_v9_4_funcs;<br>
                 break;<br>
@@ -1528,7 +1532,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 0):<br>
                 adev->mmhub.ras = &mmhub_v1_0_ras;<br>
                 break;<br>
@@ -1549,7 +1553,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)<br>
 <br>
 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;<br>
         else<br>
                 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;<br>
@@ -1565,7 +1569,7 @@ static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)<br>
         struct amdgpu_mca *mca = &adev->mca;<br>
 <br>
         /* is UMC the right IP to check for MCA?  Maybe DF? */<br>
-       switch (adev->ip_versions[UMC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {<br>
         case IP_VERSION(6, 7, 0):<br>
                 if (!adev->gmc.xgmi.connected_to_cpu) {<br>
                         mca->mp0.ras = &mca_v3_0_mp0_ras;<br>
@@ -1592,18 +1596,18 @@ static int gmc_v9_0_early_init(void *handle)<br>
          * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined<br>
          * in their IP discovery tables<br>
          */<br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||<br>
-           adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||<br>
+           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 adev->gmc.xgmi.supported = true;<br>
 <br>
-       if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {<br>
+       if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {<br>
                 adev->gmc.xgmi.supported = true;<br>
                 adev->gmc.xgmi.connected_to_cpu =<br>
                         adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);<br>
         }<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {<br>
                 enum amdgpu_pkg_type pkg_type =<br>
                         adev->smuio.funcs->get_pkg_type(adev);<br>
                 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present<br>
@@ -1652,7 +1656,7 @@ static int gmc_v9_0_late_init(void *handle)<br>
          * writes, while disables HBM ECC for vega10.<br>
          */<br>
         if (!amdgpu_sriov_vf(adev) &&<br>
-           (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {<br>
+           (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {<br>
                 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {<br>
                         if (adev->df.funcs &&<br>
                             adev->df.funcs->enable_ecc_force_par_wr_rmw)<br>
@@ -1760,7 +1764,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)<br>
 <br>
         /* set the gart size */<br>
         if (amdgpu_gart_size == -1) {<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */<br>
                 case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */<br>
                 case IP_VERSION(9, 4, 0):<br>
@@ -1839,8 +1843,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)<br>
  */<br>
 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)<br>
 {<br>
-       if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||<br>
-           (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))<br>
+       if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||<br>
+           (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))<br>
                 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);<br>
 }<br>
 <br>
@@ -2035,7 +2039,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
 <br>
         spin_lock_init(&adev->gmc.invalidate_lock);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {<br>
                 gmc_v9_4_3_init_vram_info(adev);<br>
         } else if (!adev->bios) {<br>
                 if (adev->flags & AMD_IS_APU) {<br>
@@ -2075,7 +2079,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
                 adev->gmc.vram_type = vram_type;<br>
                 adev->gmc.vram_vendor = vram_vendor;<br>
         }<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 1, 0):<br>
         case IP_VERSION(9, 2, 2):<br>
                 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);<br>
@@ -2108,7 +2112,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);<br>
                 else<br>
                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);<br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))<br>
                         adev->gmc.translate_further = adev->vm_manager.num_level > 1;<br>
                 break;<br>
         case IP_VERSION(9, 4, 1):<br>
@@ -2140,7 +2144,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {<br>
                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,<br>
                                         &adev->gmc.vm_fault);<br>
                 if (r)<br>
@@ -2169,7 +2173,10 @@ static int gmc_v9_0_sw_init(void *handle)<br>
          */<br>
         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */<br>
 <br>
-       dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;<br>
+       dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=<br>
+                                       IP_VERSION(9, 4, 2) ?<br>
+                               48 :<br>
+                               44;<br>
         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));<br>
         if (r) {<br>
                 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");<br>
@@ -2183,7 +2190,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
 <br>
         amdgpu_gmc_get_vbios_allocations(adev);<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {<br>
                 r = gmc_v9_0_init_mem_ranges(adev);<br>
                 if (r)<br>
                         return r;<br>
@@ -2209,9 +2216,11 @@ static int gmc_v9_0_sw_init(void *handle)<br>
          * for video processing.<br>
          */<br>
         adev->vm_manager.first_kfd_vmid =<br>
-               (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||<br>
-                adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||<br>
-                adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;<br>
+               (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||<br>
+                amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||<br>
+                amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ?<br>
+                       3 :<br>
+                       8;<br>
 <br>
         amdgpu_vm_manager_init(adev);<br>
 <br>
@@ -2221,7 +2230,7 @@ static int gmc_v9_0_sw_init(void *handle)<br>
         if (r)<br>
                 return r;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 amdgpu_gmc_sysfs_init(adev);<br>
 <br>
         return 0;<br>
@@ -2231,7 +2240,7 @@ static int gmc_v9_0_sw_fini(void *handle)<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))<br>
                 amdgpu_gmc_sysfs_fini(adev);<br>
         adev->gmc.num_mem_partitions = 0;<br>
         kfree(adev->gmc.mem_partitions);<br>
@@ -2253,8 +2262,7 @@ static int gmc_v9_0_sw_fini(void *handle)<br>
 <br>
 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
                 if (amdgpu_sriov_vf(adev))<br>
                         break;<br>
@@ -2288,8 +2296,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)<br>
  */<br>
 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)<br>
 {<br>
-       if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||<br>
-           (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {<br>
+       if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||<br>
+           (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {<br>
                 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);<br>
                 WARN_ON(adev->gmc.sdpif_register !=<br>
                         RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c<br>
index 71d1a2e3bac9..3f3a6445c006 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c<br>
@@ -49,8 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,<br>
 static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,<br>
                                     struct amdgpu_ring *ring)<br>
 {<br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) ||<br>
-           adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0) ||<br>
+           amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2))<br>
                 return;<br>
 <br>
         if (!ring || !ring->funcs->emit_wreg)<br>
@@ -80,7 +80,7 @@ static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)<br>
         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))<br>
                 return;<br>
 <br>
-       if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) >= IP_VERSION(4, 4, 0))<br>
                 WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);<br>
         else<br>
                 /*read back hdp ras counter to reset it to 0 */<br>
@@ -92,10 +92,10 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,<br>
 {<br>
         uint32_t def, data;<br>
 <br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) ||<br>
-           adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) ||<br>
-           adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) ||<br>
-           adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) {<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 0) ||<br>
+           amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 1) ||<br>
+           amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 1) ||<br>
+           amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 0)) {<br>
                 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));<br>
 <br>
                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))<br>
@@ -137,7 +137,7 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,<br>
 <br>
 static void hdp_v4_0_init_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[HDP_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {<br>
         case IP_VERSION(4, 2, 1):<br>
                 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);<br>
                 break;<br>
@@ -147,7 +147,7 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev)<br>
 <br>
         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);<br>
 <br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0))<br>
                 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2);<br>
 <br>
         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c<br>
index 6f20f9889a78..ab06c2b4b20b 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c<br>
@@ -51,7 +51,7 @@ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,<br>
                                 AMD_CG_SUPPORT_HDP_SD)))<br>
                 return;<br>
 <br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0))<br>
                 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1);<br>
         else<br>
                 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);<br>
@@ -61,7 +61,7 @@ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,<br>
          * forced on IPH & RC clock */<br>
         hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,<br>
                                      RC_MEM_CLK_SOFT_OVERRIDE, 1);<br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0))<br>
                 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl);<br>
         else<br>
                 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);<br>
@@ -126,7 +126,7 @@ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,<br>
         /* disable IPH & RC clock override after clock/power mode changing */<br>
         hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,<br>
                                      RC_MEM_CLK_SOFT_OVERRIDE, 0);<br>
-       if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0))<br>
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0))<br>
                 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl);<br>
         else<br>
                 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c<br>
index 0a8bc6c94fa9..875fb5ac70b5 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c<br>
@@ -353,7 +353,7 @@ static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev)<br>
 <br>
         WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);<br>
 <br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
                 program_imu_rlc_ram(adev, imu_rlc_ram_golden_11,<br>
                                 (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11));<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c<br>
index aadb74de52bc..e67a337457ed 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c<br>
@@ -128,7 +128,7 @@ static int jpeg_v2_5_sw_init(void *handle)<br>
 <br>
                 ring = adev->jpeg.inst[i].ring_dec;<br>
                 ring->use_doorbell = true;<br>
-               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))<br>
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))<br>
                         ring->vm_hub = AMDGPU_MMHUB1(0);<br>
                 else<br>
                         ring->vm_hub = AMDGPU_MMHUB0(0);<br>
@@ -822,7 +822,7 @@ static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {<br>
 <br>
 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[JPEG_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) {<br>
         case IP_VERSION(2, 6, 0):<br>
                 adev->jpeg.ras = &jpeg_v2_6_ras;<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c<br>
index df4440c21bbf..a92481da60cd 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c<br>
@@ -52,7 +52,7 @@ static int jpeg_v3_0_early_init(void *handle)<br>
 <br>
         u32 harvest;<br>
 <br>
-       switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
         case IP_VERSION(3, 1, 1):<br>
         case IP_VERSION(3, 1, 2):<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c<br>
index 3eb3dcd56b57..98ed49b16e62 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c<br>
@@ -831,7 +831,7 @@ static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {<br>
 <br>
 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[JPEG_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
                 adev->jpeg.ras = &jpeg_v4_0_ras;<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c<br>
index eb06d749876f..1e5ad1e08d2a 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c<br>
@@ -558,7 +558,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev,<br>
         WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF);<br>
 <br>
         /* invalidate ICACHE */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid);<br>
                 break;<br>
@@ -568,7 +568,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev,<br>
         }<br>
         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);<br>
         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data);<br>
                 break;<br>
@@ -578,7 +578,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev,<br>
         }<br>
 <br>
         /* prime the ICACHE. */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid);<br>
                 break;<br>
@@ -587,7 +587,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev,<br>
                 break;<br>
         }<br>
         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data);<br>
                 break;<br>
@@ -995,7 +995,7 @@ static void mes_v10_1_kiq_setting(struct amdgpu_ring *ring)<br>
         struct amdgpu_device *adev = ring->adev;<br>
 <br>
         /* tell RLC which is KIQ queue */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 3, 0):<br>
         case IP_VERSION(10, 3, 2):<br>
         case IP_VERSION(10, 3, 1):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c<br>
index 3fa5bc3ddf92..4a3020b5b30f 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c<br>
@@ -1316,7 +1316,7 @@ static int mes_v11_0_late_init(void *handle)<br>
 <br>
         /* it's only intended for use in mes_self_test case, not for s0ix and reset */<br>
         if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&<br>
-           (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))<br>
+           (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))<br>
                 amdgpu_mes_self_test(adev);<br>
 <br>
         return 0;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c<br>
index 8f76c6ecf50a..37458f906980 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c<br>
@@ -151,7 +151,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,<br>
         dev_err(adev->dev,<br>
                 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",<br>
                 status);<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 0, 0):<br>
         case IP_VERSION(2, 0, 2):<br>
                 mmhub_cid = mmhub_client_ids_navi1x[cid][rw];<br>
@@ -568,7 +568,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad<br>
         if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))<br>
                 return;<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
         case IP_VERSION(2, 1, 1):<br>
         case IP_VERSION(2, 1, 2):<br>
@@ -601,7 +601,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad<br>
                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);<br>
         }<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
         case IP_VERSION(2, 1, 1):<br>
         case IP_VERSION(2, 1, 2):<br>
@@ -625,7 +625,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade<br>
         if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))<br>
                 return;<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
         case IP_VERSION(2, 1, 1):<br>
         case IP_VERSION(2, 1, 2):<br>
@@ -651,7 +651,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 0, 0):<br>
         case IP_VERSION(2, 0, 2):<br>
         case IP_VERSION(2, 1, 0):<br>
@@ -676,7 +676,7 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)<br>
         if (amdgpu_sriov_vf(adev))<br>
                 *flags = 0;<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
         case IP_VERSION(2, 1, 1):<br>
         case IP_VERSION(2, 1, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c<br>
index 1dce053a4c4d..4ddd9448e2bc 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c<br>
@@ -90,7 +90,7 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,<br>
         dev_err(adev->dev,<br>
                 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",<br>
                 status);<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(2, 3, 0):<br>
         case IP_VERSION(2, 4, 0):<br>
         case IP_VERSION(2, 4, 1):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c<br>
index 7c9ab5491067..9627df8b194b 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c<br>
@@ -107,7 +107,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev,<br>
         dev_err(adev->dev,<br>
                 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",<br>
                 status);<br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 0):<br>
         case IP_VERSION(3, 0, 1):<br>
                 mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw];<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c<br>
index db79e6f92441..77bff803b452 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c<br>
@@ -108,7 +108,7 @@ mmhub_v3_0_1_print_l2_protection_fault_status(struct amdgpu_device *adev,<br>
                 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",<br>
                 status);<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 1):<br>
                 mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw];<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c<br>
index 8194ee2b96c4..3d80a184ce6b 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c<br>
@@ -96,7 +96,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,<br>
                 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",<br>
                 status);<br>
 <br>
-       switch (adev->ip_versions[MMHUB_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {<br>
         case IP_VERSION(3, 3, 0):<br>
                 mmhub_cid = mmhub_client_ids_v3_3[cid][rw];<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c<br>
index b6a8478dabf4..bb1873363d75 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c<br>
@@ -107,7 +107,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,<br>
 {<br>
         u32 ih_cntl, ih_rb_cntl;<br>
 <br>
-       if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3))<br>
+       if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) < IP_VERSION(5, 0, 3))<br>
                 return;<br>
 <br>
         ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);<br>
@@ -330,7 +330,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)<br>
 <br>
         if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {<br>
                 if (ih[0]->use_bus_addr) {<br>
-                       switch (adev->ip_versions[OSSSYS_HWIP][0]) {<br>
+                       switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {<br>
                         case IP_VERSION(5, 0, 3):<br>
                         case IP_VERSION(5, 2, 0):<br>
                         case IP_VERSION(5, 2, 1):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c<br>
index 4038455d7998..e523627cfe25 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c<br>
@@ -536,7 +536,7 @@ static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev)<br>
 {<br>
         uint32_t reg, reg_data;<br>
 <br>
-       if (adev->ip_versions[NBIO_HWIP][0] != IP_VERSION(3, 3, 0))<br>
+       if (amdgpu_ip_version(adev, NBIO_HWIP, 0) != IP_VERSION(3, 3, 0))<br>
                 return;<br>
 <br>
         reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c<br>
index e5b5b0f4940f..a3622897e3fe 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c<br>
@@ -338,7 +338,7 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {<br>
 <br>
 static void nbio_v4_3_init_registers(struct amdgpu_device *adev)<br>
 {<br>
-       if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {<br>
+       if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(4, 3, 0)) {<br>
                 uint32_t data;<br>
 <br>
                 data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);<br>
@@ -392,8 +392,8 @@ static void nbio_v4_3_program_aspm(struct amdgpu_device *adev)<br>
 #ifdef CONFIG_PCIEASPM<br>
         uint32_t def, data;<br>
 <br>
-       if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) &&<br>
-             !(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0)))<br>
+       if (!(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 4, 0)) &&<br>
+           !(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 6, 0)))<br>
                 return;<br>
 <br>
         def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c<br>
index 4ef1fa4603c8..e962821ae6a1 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c<br>
@@ -59,7 +59,7 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)<br>
 {<br>
         u32 tmp;<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 2, 1):<br>
         case IP_VERSION(7, 3, 0):<br>
         case IP_VERSION(7, 5, 0):<br>
@@ -78,7 +78,7 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)<br>
 <br>
 static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)<br>
 {<br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 2, 1):<br>
         case IP_VERSION(7, 3, 0):<br>
         case IP_VERSION(7, 5, 0):<br>
@@ -262,7 +262,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev<br>
 {<br>
         uint32_t def, data;<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 2, 1):<br>
         case IP_VERSION(7, 3, 0):<br>
         case IP_VERSION(7, 5, 0):<br>
@@ -369,7 +369,7 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = {<br>
 static void nbio_v7_2_init_registers(struct amdgpu_device *adev)<br>
 {<br>
         uint32_t def, data;<br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 2, 1):<br>
         case IP_VERSION(7, 3, 0):<br>
         case IP_VERSION(7, 5, 0):<br>
@@ -394,7 +394,7 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)<br>
                 break;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(7, 3, 0):<br>
         case IP_VERSION(7, 5, 1):<br>
                 data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c<br>
index 685abf57ffdd..7d6d7734dbec 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c<br>
@@ -347,7 +347,7 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)<br>
                 adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,<br>
                         mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;<br>
 <br>
-       if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) &&<br>
+       if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4) &&<br>
             !amdgpu_sriov_vf(adev)) {<br>
                 baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);<br>
                 if (baco_cntl &<br>
@@ -702,7 +702,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)<br>
 #ifdef CONFIG_PCIEASPM<br>
         uint32_t def, data;<br>
 <br>
-       if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))<br>
+       if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4))<br>
                 return;<br>
 <br>
         def = data = RREG32_PCIE(smnPCIE_LC_CNTL);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c<br>
index 13aca808ecab..0535cabe3b16 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/nv.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c<br>
@@ -214,7 +214,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,<br>
         if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))<br>
                 return -EINVAL;<br>
 <br>
-       switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 0):<br>
         case IP_VERSION(3, 0, 64):<br>
         case IP_VERSION(3, 0, 192):<br>
@@ -453,7 +453,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)<br>
                 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",<br>
                                   amdgpu_reset_method);<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 5, 0):<br>
         case IP_VERSION(13, 0, 1):<br>
         case IP_VERSION(13, 0, 3):<br>
@@ -669,7 +669,7 @@ static int nv_common_early_init(void *handle)<br>
         /* TODO: split the GC and PG flags based on the relevant IP version for which<br>
          * they are relevant.<br>
          */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(10, 1, 10):<br>
                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |<br>
                         AMD_CG_SUPPORT_GFX_CGCG |<br>
@@ -1073,7 +1073,7 @@ static int nv_common_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(2, 3, 0):<br>
         case IP_VERSION(2, 3, 1):<br>
         case IP_VERSION(2, 3, 2):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c<br>
index 5f10883da6a2..145186a1e48f 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c<br>
@@ -58,9 +58,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)<br>
                 return err;<br>
 <br>
         err = psp_init_ta_microcode(psp, ucode_prefix);<br>
-       if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) &&<br>
-               (adev->pdev->revision == 0xa1) &&<br>
-               (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) {<br>
+       if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0)) &&<br>
+           (adev->pdev->revision == 0xa1) &&<br>
+           (psp->securedisplay_context.context.bin_desc.fw_version >=<br>
+            0x27000008)) {<br>
                 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;<br>
         }<br>
         return err;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c<br>
index 8f84fe40abbb..efa37e3b7931 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c<br>
@@ -95,7 +95,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 2):<br>
         case IP_VERSION(11, 0, 4):<br>
                 err = psp_init_sos_microcode(psp, ucode_prefix);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c<br>
index 469eed084976..54008a8991fc 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c<br>
@@ -79,7 +79,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 2):<br>
                 err = psp_init_sos_microcode(psp, ucode_prefix);<br>
                 if (err)<br>
@@ -181,7 +181,7 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)<br>
 {<br>
         struct amdgpu_device *adev = psp->adev;<br>
 <br>
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) {<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {<br>
                 psp_v13_0_wait_for_vmbx_ready(psp);<br>
 <br>
                 return psp_v13_0_wait_for_bootloader(psp);<br>
@@ -728,7 +728,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)<br>
 {<br>
         struct amdgpu_device *adev = psp->adev;<br>
 <br>
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 10)) {<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 10)) {<br>
                 uint32_t  reg_data;<br>
                 /* MP1 fatal error: trigger PSP dram read to unhalt PSP<br>
                  * during MP1 triggered sync flood.<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c<br>
index d5ba58eba3e2..eaa5512a21da 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c<br>
@@ -40,7 +40,7 @@ static int psp_v13_0_4_init_microcode(struct psp_context *psp)<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
 <br>
-       switch (adev->ip_versions[MP0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 4):<br>
                 err = psp_init_toc_microcode(psp, ucode_prefix);<br>
                 if (err)<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c<br>
index cd37f45e01a1..8562ac7f7ff0 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c<br>
@@ -469,7 +469,7 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id)<br>
 <br>
 static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
                 soc15_program_register_sequence(adev,<br>
                                                 golden_settings_sdma_4,<br>
@@ -539,7 +539,7 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)<br>
          * The only chips with SDMAv4 and ULV are VG10 and VG20.<br>
          * Server SKUs take a different hysteresis setting from other SKUs.<br>
          */<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
                 if (adev->pdev->device == 0x6860)<br>
                         break;<br>
@@ -578,8 +578,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)<br>
         int ret, i;<br>
 <br>
         for (i = 0; i < adev->sdma.num_instances; i++) {<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||<br>
-                    adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                           IP_VERSION(4, 2, 2) ||<br>
+                   amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                           IP_VERSION(4, 4, 0)) {<br>
                         /* Acturus & Aldebaran will leverage the same FW memory<br>
                            for every SDMA instance */<br>
                         ret = amdgpu_sdma_init_microcode(adev, 0, true);<br>
@@ -978,7 +980,8 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)<br>
                  * Arcturus for the moment and firmware version 14<br>
                  * and above.<br>
                  */<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                           IP_VERSION(4, 2, 2) &&<br>
                     adev->sdma.instance[i].fw_version >= 14)<br>
                         WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);<br>
                 /* Extend page fault timeout to avoid interrupt storm */<br>
@@ -1255,7 +1258,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)<br>
         if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))<br>
                 return;<br>
 <br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 1, 0):<br>
         case IP_VERSION(4, 1, 1):<br>
         case IP_VERSION(4, 1, 2):<br>
@@ -1698,7 +1701,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)<br>
 {<br>
         uint fw_version = adev->sdma.instance[0].fw_version;<br>
 <br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
                 return fw_version >= 430;<br>
         case IP_VERSION(4, 0, 1):<br>
@@ -1723,7 +1726,7 @@ static int sdma_v4_0_early_init(void *handle)<br>
         }<br>
 <br>
         /* TODO: Page queue breaks driver reload under SRIOV */<br>
-       if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&<br>
+       if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 0, 0)) &&<br>
             amdgpu_sriov_vf((adev)))<br>
                 adev->sdma.has_page_queue = false;<br>
         else if (sdma_v4_0_fw_support_paging_queue(adev))<br>
@@ -1823,7 +1826,9 @@ static int sdma_v4_0_sw_init(void *handle)<br>
                  * On Arcturus, SDMA instance 5~7 has a different vmhub<br>
                  * type(AMDGPU_MMHUB1).<br>
                  */<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                           IP_VERSION(4, 2, 2) &&<br>
+                   i >= 5)<br>
                         ring->vm_hub = AMDGPU_MMHUB1(0);<br>
                 else<br>
                         ring->vm_hub = AMDGPU_MMHUB0(0);<br>
@@ -1843,8 +1848,10 @@ static int sdma_v4_0_sw_init(void *handle)<br>
                         /* paging queue use same doorbell index/routing as gfx queue<br>
                          * with 0x400 (4096 dwords) offset on second doorbell page<br>
                          */<br>
-                       if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&<br>
-                           adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {<br>
+                       if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=<br>
+                                   IP_VERSION(4, 0, 0) &&<br>
+                           amdgpu_ip_version(adev, SDMA0_HWIP, 0) <<br>
+                                   IP_VERSION(4, 2, 0)) {<br>
                                 ring->doorbell_index =<br>
                                         adev->doorbell_index.sdma_engine[i] << 1;<br>
                                 ring->doorbell_index += 0x400;<br>
@@ -1856,7 +1863,9 @@ static int sdma_v4_0_sw_init(void *handle)<br>
                                         (adev->doorbell_index.sdma_engine[i] + 1) << 1;<br>
                         }<br>
 <br>
-                       if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)<br>
+                       if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                                   IP_VERSION(4, 2, 2) &&<br>
+                           i >= 5)<br>
                                 ring->vm_hub = AMDGPU_MMHUB1(0);<br>
                         else<br>
                                 ring->vm_hub = AMDGPU_MMHUB0(0);<br>
@@ -1890,8 +1899,8 @@ static int sdma_v4_0_sw_fini(void *handle)<br>
                         amdgpu_ring_fini(&adev->sdma.instance[i].page);<br>
         }<br>
 <br>
-       if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||<br>
-            adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))<br>
+       if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 2, 2) ||<br>
+           amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 0))<br>
                 amdgpu_sdma_destroy_inst_ctx(adev, true);<br>
         else<br>
                 amdgpu_sdma_destroy_inst_ctx(adev, false);<br>
@@ -2036,14 +2045,16 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,<br>
                 amdgpu_fence_process(&adev->sdma.instance[instance].ring);<br>
                 break;<br>
         case 1:<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                   IP_VERSION(4, 2, 0))<br>
                         amdgpu_fence_process(&adev->sdma.instance[instance].page);<br>
                 break;<br>
         case 2:<br>
                 /* XXX compute */<br>
                 break;<br>
         case 3:<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) !=<br>
+                   IP_VERSION(4, 2, 0))<br>
                         amdgpu_fence_process(&adev->sdma.instance[instance].page);<br>
                 break;<br>
         }<br>
@@ -2259,7 +2270,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 1, 0):<br>
         case IP_VERSION(4, 1, 1):<br>
         case IP_VERSION(4, 1, 2):<br>
@@ -2622,7 +2633,7 @@ static struct amdgpu_sdma_ras sdma_v4_0_ras = {<br>
 <br>
 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 2, 0):<br>
         case IP_VERSION(4, 2, 2):<br>
                 adev->sdma.ras = &sdma_v4_0_ras;<br>
@@ -2633,7 +2644,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)<br>
         default:<br>
                 break;<br>
         }<br>
-<br>
 }<br>
 <br>
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c<br>
index 267c1b7b8dcd..1cadd3cb26a9 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c<br>
@@ -132,7 +132,8 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)<br>
         int ret, i;<br>
 <br>
         for (i = 0; i < adev->sdma.num_instances; i++) {<br>
-               if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) {<br>
+               if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                   IP_VERSION(4, 4, 2)) {<br>
                         ret = amdgpu_sdma_init_microcode(adev, 0, true);<br>
                         break;<br>
                 } else {<br>
@@ -1231,7 +1232,7 @@ static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t re<br>
 <br>
 static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(4, 4, 2):<br>
                 return false;<br>
         default:<br>
@@ -1401,7 +1402,7 @@ static int sdma_v4_4_2_sw_fini(void *handle)<br>
                         amdgpu_ring_fini(&adev->sdma.instance[i].page);<br>
         }<br>
 <br>
-       if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2))<br>
+       if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2))<br>
                 amdgpu_sdma_destroy_inst_ctx(adev, true);<br>
         else<br>
                 amdgpu_sdma_destroy_inst_ctx(adev, false);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c<br>
index 1cc34efb455b..e0527e5ed7d1 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c<br>
@@ -184,7 +184,7 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3<br>
 <br>
 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(5, 0, 0):<br>
                 soc15_program_register_sequence(adev,<br>
                                                 golden_settings_sdma_5,<br>
@@ -1697,7 +1697,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(5, 0, 0):<br>
         case IP_VERSION(5, 0, 2):<br>
         case IP_VERSION(5, 0, 5):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c<br>
index 2b3ebebc4299..0ccb7523bc55 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c<br>
@@ -1510,7 +1510,7 @@ static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,<br>
 static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,<br>
                                                      int i)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(5, 2, 1):<br>
                 if (adev->sdma.instance[i].fw_version < 70)<br>
                         return false;<br>
@@ -1575,8 +1575,9 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev<br>
         int i;<br>
 <br>
         for (i = 0; i < adev->sdma.num_instances; i++) {<br>
-<br>
-               if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))<br>
+               if (adev->sdma.instance[i].fw_version < 70 &&<br>
+                   amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==<br>
+                           IP_VERSION(5, 2, 1))<br>
                         adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;<br>
 <br>
                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {<br>
@@ -1605,7 +1606,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(5, 2, 0):<br>
         case IP_VERSION(5, 2, 2):<br>
         case IP_VERSION(5, 2, 1):<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c<br>
index 0e25b6fb1340..4d6de77d289e 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c<br>
@@ -1246,14 +1246,13 @@ static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {<br>
 <br>
 static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[SDMA0_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {<br>
         case IP_VERSION(6, 0, 3):<br>
                 adev->sdma.ras = &sdma_v6_0_3_ras;<br>
                 break;<br>
         default:<br>
                 break;<br>
         }<br>
-<br>
 }<br>
 <br>
 static int sdma_v6_0_early_init(void *handle)<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c<br>
index 07ded70f4df9..93f6772d1b24 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c<br>
@@ -36,7 +36,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c<br>
 #if 0<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) &&<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7) &&<br>
             adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))<br>
                 return true;<br>
 #endif<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c<br>
index 9c72add6f93d..66ed28136bc8 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c<br>
@@ -174,8 +174,8 @@ static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {<br>
 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,<br>
                                     const struct amdgpu_video_codecs **codecs)<br>
 {<br>
-       if (adev->ip_versions[VCE_HWIP][0]) {<br>
-               switch (adev->ip_versions[VCE_HWIP][0]) {<br>
+       if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {<br>
+               switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {<br>
                 case IP_VERSION(4, 0, 0):<br>
                 case IP_VERSION(4, 1, 0):<br>
                         if (encode)<br>
@@ -187,7 +187,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,<br>
                         return -EINVAL;<br>
                 }<br>
         } else {<br>
-               switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                         if (encode)<br>
@@ -324,12 +324,12 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)<br>
 {<br>
         u32 reference_clock = adev->clock.spll.reference_freq;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))<br>
                 return 10000;<br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1))<br>
                 return reference_clock / 4;<br>
 <br>
         return reference_clock;<br>
@@ -523,7 +523,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)<br>
                 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",<br>
                                   amdgpu_reset_method);<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(10, 0, 0):<br>
         case IP_VERSION(10, 0, 1):<br>
         case IP_VERSION(12, 0, 0):<br>
@@ -599,7 +599,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)<br>
 <br>
 static bool soc15_supports_baco(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 0):<br>
         case IP_VERSION(11, 0, 2):<br>
                 if (adev->asic_type == CHIP_VEGA20) {<br>
@@ -938,7 +938,7 @@ static int soc15_common_early_init(void *handle)<br>
         /* TODO: split the GC and PG flags based on the relevant IP version for which<br>
          * they are relevant.<br>
          */<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 0, 1):<br>
                 adev->asic_funcs = &soc15_asic_funcs;<br>
                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |<br>
@@ -1367,7 +1367,7 @@ static int soc15_common_set_clockgating_state(void *handle,<br>
         if (amdgpu_sriov_vf(adev))<br>
                 return 0;<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(6, 1, 0):<br>
         case IP_VERSION(6, 2, 0):<br>
         case IP_VERSION(7, 4, 0):<br>
@@ -1423,8 +1423,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)<br>
 <br>
         adev->hdp.funcs->get_clock_gating_state(adev, flags);<br>
 <br>
-       if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {<br>
-<br>
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) {<br>
                 /* AMD_CG_SUPPORT_DRM_MGCG */<br>
                 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));<br>
                 if (!(data & 0x01000000))<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c<br>
index 2ecc8c9a078b..92a80780ab72 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c<br>
@@ -153,7 +153,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,<br>
         if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))<br>
                 return -EINVAL;<br>
 <br>
-       switch (adev->ip_versions[UVD_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
         case IP_VERSION(4, 0, 2):<br>
         case IP_VERSION(4, 0, 4):<br>
@@ -374,7 +374,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)<br>
                 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",<br>
                                   amdgpu_reset_method);<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 0):<br>
         case IP_VERSION(13, 0, 7):<br>
         case IP_VERSION(13, 0, 10):<br>
@@ -448,7 +448,7 @@ const struct amdgpu_ip_block_version soc21_common_ip_block = {<br>
 <br>
 static bool soc21_need_full_reset(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
                 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);<br>
         case IP_VERSION(11, 0, 2):<br>
@@ -577,7 +577,7 @@ static int soc21_common_early_init(void *handle)<br>
 <br>
         adev->rev_id = amdgpu_device_get_rev_id(adev);<br>
         adev->external_rev_id = 0xff;<br>
-       switch (adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
                 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |<br>
                         AMD_CG_SUPPORT_GFX_CGLS |<br>
@@ -843,7 +843,7 @@ static int soc21_common_set_clockgating_state(void *handle,<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       switch (adev->ip_versions[NBIO_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {<br>
         case IP_VERSION(4, 3, 0):<br>
         case IP_VERSION(4, 3, 1):<br>
         case IP_VERSION(7, 7, 0):<br>
@@ -865,7 +865,7 @@ static int soc21_common_set_powergating_state(void *handle,<br>
 {<br>
         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
 <br>
-       switch (adev->ip_versions[LSDMA_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {<br>
         case IP_VERSION(6, 0, 0):<br>
         case IP_VERSION(6, 0, 2):<br>
                 adev->lsdma.funcs->update_memory_power_gating(adev,<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c<br>
index 67164991f541..99713949b61f 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c<br>
@@ -273,14 +273,15 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)<br>
 <br>
         memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0],<br>
                sizeof(uint32_t) * 5);<br>
-       set_hw_resources.mmhub_version = adev->ip_versions[MMHUB_HWIP][0];<br>
+       set_hw_resources.mmhub_version = amdgpu_ip_version(adev, MMHUB_HWIP, 0);<br>
 <br>
         memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0],<br>
                sizeof(uint32_t) * 5);<br>
-       set_hw_resources.osssys_version = adev->ip_versions[OSSSYS_HWIP][0];<br>
+       set_hw_resources.osssys_version =<br>
+               amdgpu_ip_version(adev, OSSSYS_HWIP, 0);<br>
 <br>
-       set_hw_resources.vcn_version = adev->ip_versions[VCN_HWIP][0];<br>
-       set_hw_resources.vpe_version = adev->ip_versions[VPE_HWIP][0];<br>
+       set_hw_resources.vcn_version = amdgpu_ip_version(adev, VCN_HWIP, 0);<br>
+       set_hw_resources.vpe_version = amdgpu_ip_version(adev, VPE_HWIP, 0);<br>
 <br>
         set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;<br>
         set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c<br>
index 6fbea38f4d3e..aba403d71806 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c<br>
@@ -187,7 +187,7 @@ static int vcn_v2_5_sw_init(void *handle)<br>
                 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +<br>
                                 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);<br>
 <br>
-               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))<br>
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))<br>
                         ring->vm_hub = AMDGPU_MMHUB1(0);<br>
                 else<br>
                         ring->vm_hub = AMDGPU_MMHUB0(0);<br>
@@ -207,7 +207,8 @@ static int vcn_v2_5_sw_init(void *handle)<br>
                         ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +<br>
                                         (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));<br>
 <br>
-                       if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))<br>
+                       if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==<br>
+                           IP_VERSION(2, 5, 0))<br>
                                 ring->vm_hub = AMDGPU_MMHUB1(0);<br>
                         else<br>
                                 ring->vm_hub = AMDGPU_MMHUB0(0);<br>
@@ -794,7 +795,7 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,<br>
 {<br>
         uint32_t tmp;<br>
 <br>
-       if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0))<br>
+       if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))<br>
                 return;<br>
 <br>
         tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |<br>
@@ -1985,7 +1986,7 @@ static struct amdgpu_vcn_ras vcn_v2_6_ras = {<br>
 <br>
 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[VCN_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {<br>
         case IP_VERSION(2, 6, 0):<br>
                 adev->vcn.ras = &vcn_v2_6_ras;<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c<br>
index a61ecefdafc5..e02af4de521c 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c<br>
@@ -100,7 +100,8 @@ static int vcn_v3_0_early_init(void *handle)<br>
                         /* both instances are harvested, disable the block */<br>
                         return -ENOENT;<br>
 <br>
-               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33))<br>
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==<br>
+                   IP_VERSION(3, 0, 33))<br>
                         adev->vcn.num_enc_rings = 0;<br>
                 else<br>
                         adev->vcn.num_enc_rings = 2;<br>
@@ -227,9 +228,10 @@ static int vcn_v3_0_sw_init(void *handle)<br>
                                              cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);<br>
                 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);<br>
                 fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;<br>
-               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))<br>
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2))<br>
                         fw_shared->smu_interface_info.smu_interface_type = 2;<br>
-               else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))<br>
+               else if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==<br>
+                        IP_VERSION(3, 1, 1))<br>
                         fw_shared->smu_interface_info.smu_interface_type = 1;<br>
 <br>
                 if (amdgpu_vcnfw_log)<br>
@@ -1255,7 +1257,8 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)<br>
                 fw_shared->rb.wptr = lower_32_bits(ring->wptr);<br>
                 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);<br>
 <br>
-               if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {<br>
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=<br>
+                   IP_VERSION(3, 0, 33)) {<br>
                         fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);<br>
                         ring = &adev->vcn.inst[i].ring_enc[0];<br>
                         WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));<br>
@@ -1628,7 +1631,8 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,<br>
                                         UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,<br>
                                         ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);<br>
 <br>
-                               if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {<br>
+                               if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=<br>
+                                   IP_VERSION(3, 0, 33)) {<br>
                                         /* Restore */<br>
                                         fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;<br>
                                         fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c<br>
index ae8db12d8832..96831f931423 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c<br>
@@ -169,7 +169,8 @@ static int vcn_v4_0_sw_init(void *handle)<br>
                 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?<br>
                         AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;<br>
 <br>
-               if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) {<br>
+               if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==<br>
+                   IP_VERSION(4, 0, 2)) {<br>
                         fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;<br>
                         fw_shared->drm_key_wa.method =<br>
                                 AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;<br>
@@ -1852,7 +1853,7 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)<br>
                 if (adev->vcn.harvest_config & (1 << i))<br>
                         continue;<br>
 <br>
-               if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2))<br>
+               if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2))<br>
                         vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;<br>
 <br>
                 adev->vcn.inst[i].ring_enc[0].funcs =<br>
@@ -2159,7 +2160,7 @@ static struct amdgpu_vcn_ras vcn_v4_0_ras = {<br>
 <br>
 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[VCN_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {<br>
         case IP_VERSION(4, 0, 0):<br>
                 adev->vcn.ras = &vcn_v4_0_ras;<br>
                 break;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c<br>
index dbc99536440f..ddfc6941f9d5 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c<br>
@@ -291,7 +291,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)<br>
 <br>
         adev->nbio.funcs->ih_control(adev);<br>
 <br>
-       if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 2, 1)) &&<br>
+       if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) &&<br>
             adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {<br>
                 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);<br>
                 if (adev->irq.ih.use_bus_addr) {<br>
@@ -304,8 +304,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)<br>
         /* psp firmware won't program IH_CHICKEN for aldebaran<br>
          * driver needs to program it properly according to<br>
          * MC_SPACE type in IH_RB_CNTL */<br>
-       if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) ||<br>
-           (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) {<br>
+       if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) ||<br>
+           (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) {<br>
                 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);<br>
                 if (adev->irq.ih.use_bus_addr) {<br>
                         ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,<br>
@@ -334,8 +334,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)<br>
                 vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index));<br>
 <br>
         /* Enable IH Retry CAM */<br>
-       if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0) ||<br>
-           adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))<br>
+       if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) ||<br>
+           amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))<br>
                 WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,<br>
                                ENABLE, 1);<br>
         else<br>
@@ -537,7 +537,7 @@ static int vega20_ih_sw_init(void *handle)<br>
                 return r;<br>
 <br>
         if ((adev->flags & AMD_IS_APU) &&<br>
-           (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)))<br>
+           (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2)))<br>
                 use_bus_addr = false;<br>
 <br>
         r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);<br>
@@ -554,7 +554,7 @@ static int vega20_ih_sw_init(void *handle)<br>
         adev->irq.ih1.use_doorbell = true;<br>
         adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;<br>
 <br>
-       if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) {<br>
+       if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) != IP_VERSION(4, 4, 2)) {<br>
                 r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);<br>
                 if (r)<br>
                         return r;<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c<br>
index ab4a63bb3e3e..0a9cf9dfc224 100644<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c<br>
@@ -65,7 +65,7 @@ static int kfd_resume(struct kfd_node *kfd);<br>
 <br>
 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)<br>
 {<br>
-       uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];<br>
+       uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);<br>
 <br>
         switch (sdma_version) {<br>
         case IP_VERSION(4, 0, 0):/* VEGA10 */<br>
@@ -282,7 +282,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)<br>
                         f2g = &gfx_v8_kfd2kgd;<br>
                 break;<br>
         default:<br>
-               switch (adev->ip_versions[GC_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {<br>
                 /* Vega 10 */<br>
                 case IP_VERSION(9, 0, 1):<br>
                         gfx_target_version = 90000;<br>
@@ -427,9 +427,11 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)<br>
         }<br>
 <br>
         if (!f2g) {<br>
-               if (adev->ip_versions[GC_HWIP][0])<br>
-                       dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",<br>
-                               adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0))<br>
+                       dev_err(kfd_device,<br>
+                               "GC IP %06x %s not supported in kfd\n",<br>
+                               amdgpu_ip_version(adev, GC_HWIP, 0),<br>
+                               vf ? "VF" : "");<br>
                 else<br>
                         dev_err(kfd_device, "%s %s not supported in kfd\n",<br>
                                 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c<br>
index 7d82c7da223a..192b0d106413 100644<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c<br>
@@ -1001,7 +1001,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)<br>
         void *r;<br>
 <br>
         /* Page migration works on gfx9 or newer */<br>
-       if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))<br>
                 return -EINVAL;<br>
 <br>
         if (adev->gmc.is_app_apu)<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c<br>
index 1a03173e2313..8ee2bedd301a 100644<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c<br>
@@ -205,7 +205,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,<br>
 <br>
 static inline bool pm_use_ext_eng(struct kfd_dev *dev)<br>
 {<br>
-       return dev->adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 2, 0);<br>
+       return amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) >=<br>
+              IP_VERSION(5, 2, 0);<br>
 }<br>
 <br>
 static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h<br>
index b315311dfe2a..ae8e6ce9436d 100644<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h<br>
@@ -202,7 +202,7 @@ enum cache_policy {<br>
         cache_policy_noncoherent<br>
 };<br>
 <br>
-#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])<br>
+#define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0))<br>
 #define KFD_IS_SOC15(dev)   ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))<br>
 #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\<br>
         ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) ||        \<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c<br>
index 841ba6102bbb..c8abe7118907 100644<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c<br>
@@ -1194,7 +1194,7 @@ svm_range_get_pte_flags(struct kfd_node *node,<br>
         if (domain == SVM_RANGE_VRAM_DOMAIN)<br>
                 bo_node = prange->svm_bo->node;<br>
 <br>
-       switch (node->adev->ip_versions[GC_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {<br>
         case IP_VERSION(9, 4, 1):<br>
                 if (domain == SVM_RANGE_VRAM_DOMAIN) {<br>
                         if (bo_node == node) {<br>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c<br>
index 5efebc06296b..933c9b5d5252 100644<br>
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c<br>
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c<br>
@@ -1173,7 +1173,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)<br>
         for (i = 0; i < fb_info->num_fb; ++i)<br>
                 hw_params.fb[i] = &fb_info->fb[i];<br>
 <br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(3, 1, 3):<br>
         case IP_VERSION(3, 1, 4):<br>
         case IP_VERSION(3, 5, 0):<br>
@@ -1606,7 +1606,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)<br>
 <br>
         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;<br>
 <br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
                 switch (adev->dm.dmcub_fw_version) {<br>
                 case 0: /* development */<br>
@@ -1631,7 +1631,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)<br>
                 init_data.flags.gpu_vm_support = true;<br>
                 break;<br>
         default:<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                         /* enable S/G on PCO and RV2 */<br>
@@ -2015,7 +2015,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)<br>
                         return 0;<br>
                 break;<br>
         default:<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(2, 0, 2):<br>
                 case IP_VERSION(2, 0, 3):<br>
                 case IP_VERSION(2, 0, 0):<br>
@@ -2105,7 +2105,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)<br>
         enum dmub_status status;<br>
         int r;<br>
 <br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
                 dmub_asic = DMUB_ASIC_DCN21;<br>
                 break;<br>
@@ -2477,7 +2477,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)<br>
          * therefore, this function apply to navi10/12/14 but not Renoir<br>
          * *<br>
          */<br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(2, 0, 2):<br>
         case IP_VERSION(2, 0, 0):<br>
                 break;<br>
@@ -4429,7 +4429,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)<br>
                 }<br>
 <br>
         /* Use Outbox interrupt */<br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 0):<br>
         case IP_VERSION(3, 1, 2):<br>
         case IP_VERSION(3, 1, 3):<br>
@@ -4447,12 +4447,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)<br>
                 break;<br>
         default:<br>
                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",<br>
-                             adev->ip_versions[DCE_HWIP][0]);<br>
+                             amdgpu_ip_version(adev, DCE_HWIP, 0));<br>
         }<br>
 <br>
         /* Determine whether to enable PSR support by default. */<br>
         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(3, 1, 2):<br>
                 case IP_VERSION(3, 1, 3):<br>
                 case IP_VERSION(3, 1, 4):<br>
@@ -4470,7 +4470,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)<br>
         }<br>
 <br>
         if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(3, 1, 4):<br>
                 case IP_VERSION(3, 1, 5):<br>
                 case IP_VERSION(3, 1, 6):<br>
@@ -4589,7 +4589,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)<br>
                 }<br>
                 break;<br>
         default:<br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(1, 0, 0):<br>
                 case IP_VERSION(1, 0, 1):<br>
                 case IP_VERSION(2, 0, 2):<br>
@@ -4615,7 +4615,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)<br>
                         break;<br>
                 default:<br>
                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",<br>
-                                       adev->ip_versions[DCE_HWIP][0]);<br>
+                                       amdgpu_ip_version(adev, DCE_HWIP, 0));<br>
                         goto fail;<br>
                 }<br>
                 break;<br>
@@ -4698,14 +4698,14 @@ static int dm_init_microcode(struct amdgpu_device *adev)<br>
         char *fw_name_dmub;<br>
         int r;<br>
 <br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(2, 1, 0):<br>
                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;<br>
                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))<br>
                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;<br>
                 break;<br>
         case IP_VERSION(3, 0, 0):<br>
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))<br>
                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;<br>
                 else<br>
                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;<br>
@@ -4835,7 +4835,7 @@ static int dm_early_init(void *handle)<br>
                 break;<br>
         default:<br>
 <br>
-               switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
                 case IP_VERSION(2, 0, 2):<br>
                 case IP_VERSION(3, 0, 0):<br>
                         adev->mode_info.num_crtc = 6;<br>
@@ -4872,7 +4872,7 @@ static int dm_early_init(void *handle)<br>
                         break;<br>
                 default:<br>
                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",<br>
-                                       adev->ip_versions[DCE_HWIP][0]);<br>
+                                       amdgpu_ip_version(adev, DCE_HWIP, 0));<br>
                         return -EINVAL;<br>
                 }<br>
                 break;<br>
@@ -11006,7 +11006,7 @@ int amdgpu_dm_process_dmub_set_config_sync(<br>
  */<br>
 bool check_seamless_boot_capability(struct amdgpu_device *adev)<br>
 {<br>
-       switch (adev->ip_versions[DCE_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {<br>
         case IP_VERSION(3, 0, 1):<br>
                 if (!adev->mman.keep_stolen_vga_memory)<br>
                         return true;<br>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c<br>
index b97cbc4e5477..8038fe3d193e 100644<br>
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c<br>
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c<br>
@@ -226,7 +226,7 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,<br>
         tiling_info->gfx9.num_rb_per_se =<br>
                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;<br>
         tiling_info->gfx9.shaderEnable = 1;<br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))<br>
                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;<br>
 }<br>
 <br>
@@ -669,7 +669,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty<br>
         case AMDGPU_FAMILY_YC:<br>
         case AMDGPU_FAMILY_GC_10_3_6:<br>
         case AMDGPU_FAMILY_GC_10_3_7:<br>
-               if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))<br>
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))<br>
                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);<br>
                 else<br>
                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);<br>
@@ -1069,8 +1069,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,<br>
          * is to gesture the YouTube Android app into full screen<br>
          * on ChromeOS.<br>
          */<br>
-       if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||<br>
-           (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&<br>
+       if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||<br>
+           (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&<br>
             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&<br>
             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))<br>
                 return -EINVAL;<br>
@@ -1509,7 +1509,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,<br>
                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,<br>
                                                    supported_rotations);<br>
 <br>
-       if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&<br>
+       if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&<br>
             plane->type != DRM_PLANE_TYPE_CURSOR)<br>
                 drm_plane_enable_fb_damage_clips(plane);<br>
 <br>
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c<br>
index 84e1af6a6ce7..e789a48089ad 100644<br>
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c<br>
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c<br>
@@ -2024,8 +2024,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_<br>
                                uint32_t mask, enum amdgpu_device_attr_states *states)<br>
 {<br>
         struct device_attribute *dev_attr = &attr->dev_attr;<br>
-       uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];<br>
-       uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];<br>
+       uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);<br>
+       uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);<br>
         const char *attr_name = dev_attr->attr.name;<br>
 <br>
         if (!(attr->flags & mask)) {<br>
@@ -2917,7 +2917,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,<br>
                                          char *buf)<br>
 {<br>
         struct amdgpu_device *adev = dev_get_drvdata(dev);<br>
-       uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];<br>
+       uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);<br>
 <br>
         if (gc_ver == IP_VERSION(10, 3, 1))<br>
                 return sysfs_emit(buf, "%s\n",<br>
@@ -3205,7 +3205,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,<br>
         struct device *dev = kobj_to_dev(kobj);<br>
         struct amdgpu_device *adev = dev_get_drvdata(dev);<br>
         umode_t effective_mode = attr->mode;<br>
-       uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];<br>
+       uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);<br>
         uint32_t tmp;<br>
 <br>
         /* under multi-vf mode, the hwmon attributes are all not supported */<br>
@@ -4158,8 +4158,8 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,<br>
 <br>
 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)<br>
 {<br>
-       uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];<br>
-       uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];<br>
+       uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);<br>
+       uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);<br>
         uint32_t value;<br>
         uint64_t value64 = 0;<br>
         uint32_t query = 0;<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c<br>
index ed23d7de3f28..e6f1620acdd4 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c<br>
@@ -485,7 +485,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)<br>
         if (adev->asic_type == CHIP_VEGA20)<br>
                 return false;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))<br>
                 return true;<br>
 <br>
         return false;<br>
@@ -603,7 +603,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)<br>
         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)<br>
                 smu->od_enabled = true;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 5):<br>
         case IP_VERSION(11, 0, 9):<br>
@@ -775,8 +775,8 @@ static int smu_late_init(void *handle)<br>
                 }<br>
         }<br>
 <br>
-       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||<br>
-           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))<br>
+       if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||<br>
+           (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))<br>
                 return 0;<br>
 <br>
         if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {<br>
@@ -1259,7 +1259,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)<br>
         uint64_t features_supported;<br>
         int ret = 0;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 7):<br>
         case IP_VERSION(11, 0, 11):<br>
         case IP_VERSION(11, 5, 0):<br>
@@ -1449,7 +1449,7 @@ static int smu_start_smc_engine(struct smu_context *smu)<br>
         int ret = 0;<br>
 <br>
         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {<br>
-               if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {<br>
+               if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {<br>
                         if (smu->ppt_funcs->load_microcode) {<br>
                                 ret = smu->ppt_funcs->load_microcode(smu);<br>
                                 if (ret)<br>
@@ -1549,7 +1549,7 @@ static int smu_disable_dpms(struct smu_context *smu)<br>
          * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)<br>
          * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.<br>
          */<br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 0):<br>
         case IP_VERSION(13, 0, 7):<br>
         case IP_VERSION(13, 0, 10):<br>
@@ -1570,7 +1570,7 @@ static int smu_disable_dpms(struct smu_context *smu)<br>
          *     properly.<br>
          */<br>
         if (smu->uploading_custom_pp_table) {<br>
-               switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                 case IP_VERSION(11, 0, 0):<br>
                 case IP_VERSION(11, 0, 5):<br>
                 case IP_VERSION(11, 0, 9):<br>
@@ -1590,7 +1590,7 @@ static int smu_disable_dpms(struct smu_context *smu)<br>
          * on BACO in. Driver involvement is unnecessary.<br>
          */<br>
         if (use_baco) {<br>
-               switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                 case IP_VERSION(11, 0, 7):<br>
                 case IP_VERSION(11, 0, 0):<br>
                 case IP_VERSION(11, 0, 5):<br>
@@ -1607,7 +1607,7 @@ static int smu_disable_dpms(struct smu_context *smu)<br>
          * for gpu reset and S0i3 cases. Driver involvement is unnecessary.<br>
          */<br>
         if (amdgpu_in_reset(adev) || adev->in_s0ix) {<br>
-               switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                 case IP_VERSION(13, 0, 4):<br>
                 case IP_VERSION(13, 0, 11):<br>
                         return 0;<br>
@@ -1634,7 +1634,7 @@ static int smu_disable_dpms(struct smu_context *smu)<br>
                 }<br>
         }<br>
 <br>
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&<br>
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&<br>
             !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)<br>
                 adev->gfx.rlc.funcs->stop(adev);<br>
 <br>
@@ -2391,7 +2391,7 @@ int smu_get_power_limit(void *handle,<br>
         } else {<br>
                 switch (limit_level) {<br>
                 case SMU_PPT_LIMIT_CURRENT:<br>
-                       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+                       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                         case IP_VERSION(13, 0, 2):<br>
                         case IP_VERSION(11, 0, 7):<br>
                         case IP_VERSION(11, 0, 11):<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c<br>
index 18487ae10bcf..650482cedd1f 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c<br>
@@ -345,8 +345,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,<br>
 <br>
         /* DPM UCLK enablement should be skipped for navi10 A0 secure board */<br>
         if (!(is_asic_secure(smu) &&<br>
-            (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&<br>
-            (adev->rev_id == 0)) &&<br>
+             (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&<br>
+             (adev->rev_id == 0)) &&<br>
             (adev->pm.pp_feature & PP_MCLK_DPM_MASK))<br>
                 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)<br>
                                 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)<br>
@@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,<br>
 <br>
         /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */<br>
         if (is_asic_secure(smu) &&<br>
-           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&<br>
+           (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&<br>
             (adev->rev_id == 0))<br>
                 *(uint64_t *)feature_mask &=<br>
                                 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);<br>
@@ -916,7 +916,7 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,<br>
                 return ret;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 9):<br>
                 if (smu_version > 0x00341C00)<br>
                         ret = navi12_get_smu_metrics_data(smu, member, value);<br>
@@ -926,8 +926,12 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 5):<br>
         default:<br>
-               if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||<br>
-                     ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))<br>
+               if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(11, 0, 5)) &&<br>
+                    smu_version > 0x00351F00) ||<br>
+                   ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(11, 0, 0)) &&<br>
+                    smu_version > 0x002A3B00))<br>
                         ret = navi10_get_smu_metrics_data(smu, member, value);<br>
                 else<br>
                         ret = navi10_get_legacy_smu_metrics_data(smu, member, value);<br>
@@ -1712,7 +1716,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)<br>
         uint32_t sclk_freq;<br>
 <br>
         pstate_table->gfxclk_pstate.min = gfx_table->min;<br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
                 switch (adev->pdev->revision) {<br>
                 case 0xf0: /* XTX */<br>
@@ -2754,8 +2758,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)<br>
         if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))<br>
                 return false;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5))<br>
                 return true;<br>
 <br>
         return false;<br>
@@ -2863,8 +2867,10 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)<br>
          * - PPSMC_MSG_SetDriverDummyTableDramAddrLow<br>
          * - PPSMC_MSG_GetUMCFWWA<br>
          */<br>
-       if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||<br>
-           ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {<br>
+       if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&<br>
+            (pmfw_version >= 0x2a3500)) ||<br>
+           ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) &&<br>
+            (pmfw_version >= 0x351D00))) {<br>
                 ret = smu_cmn_send_smc_msg_with_param(smu,<br>
                                                       SMU_MSG_GET_UMC_FW_WA,<br>
                                                       0,<br>
@@ -2883,13 +2889,15 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)<br>
                         return 0;<br>
 <br>
                 if (umc_fw_disable_cdr) {<br>
-                       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))<br>
+                       if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                           IP_VERSION(11, 0, 0))<br>
                                 return navi10_umc_hybrid_cdr_workaround(smu);<br>
                 } else {<br>
                         return navi10_set_dummy_pstates_table_location(smu);<br>
                 }<br>
         } else {<br>
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))<br>
+               if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                   IP_VERSION(11, 0, 0))<br>
                         return navi10_umc_hybrid_cdr_workaround(smu);<br>
         }<br>
 <br>
@@ -3356,7 +3364,7 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,<br>
                 return ret;<br>
         }<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 9):<br>
                 if (smu_version > 0x00341C00)<br>
                         ret = navi12_get_gpu_metrics(smu, table);<br>
@@ -3366,8 +3374,12 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 5):<br>
         default:<br>
-               if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||<br>
-                     ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))<br>
+               if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(11, 0, 5)) &&<br>
+                    smu_version > 0x00351F00) ||<br>
+                   ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(11, 0, 0)) &&<br>
+                    smu_version > 0x002A3B00))<br>
                         ret = navi10_get_gpu_metrics(smu, table);<br>
                 else<br>
                         ret = navi10_get_legacy_gpu_metrics(smu, table);<br>
@@ -3385,7 +3397,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)<br>
         uint32_t param = 0;<br>
 <br>
         /* Navi12 does not support this */<br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9))<br>
                 return 0;<br>
 <br>
         /*<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c<br>
index 4bb289f9b4b8..164c2264027d 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c<br>
@@ -73,12 +73,16 @@<br>
 <br>
 #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15<br>
 <br>
-#define GET_PPTABLE_MEMBER(field, member) do {\<br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\<br>
-               (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\<br>
-       else\<br>
-               (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\<br>
-} while(0)<br>
+#define GET_PPTABLE_MEMBER(field, member)                                    \<br>
+       do {                                                                 \<br>
+               if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==             \<br>
+                   IP_VERSION(11, 0, 13))                                   \<br>
+                       (*member) = (smu->smu_table.driver_pptable +         \<br>
+                                    offsetof(PPTable_beige_goby_t, field)); \<br>
+               else                                                         \<br>
+                       (*member) = (smu->smu_table.driver_pptable +         \<br>
+                                    offsetof(PPTable_t, field));            \<br>
+       } while (0)<br>
 <br>
 /* STB FIFO depth is in 64bit units */<br>
 #define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8<br>
@@ -91,7 +95,7 @@<br>
 <br>
 static int get_table_size(struct smu_context *smu)<br>
 {<br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))<br>
                 return sizeof(PPTable_beige_goby_t);<br>
         else<br>
                 return sizeof(PPTable_t);<br>
@@ -309,7 +313,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,<br>
         }<br>
 <br>
         if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&<br>
-           (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) &&<br>
+           (amdgpu_ip_version(adev, MP1_HWIP, 0) > IP_VERSION(11, 0, 7)) &&<br>
             !(adev->flags & AMD_IS_APU))<br>
                 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);<br>
 <br>
@@ -434,7 +438,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)<br>
         PPTable_beige_goby_t *ppt_beige_goby;<br>
         PPTable_t *ppt;<br>
 <br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))<br>
                 ppt_beige_goby = smu->smu_table.driver_pptable;<br>
         else<br>
                 ppt = smu->smu_table.driver_pptable;<br>
@@ -447,7 +451,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)<br>
         if (ret)<br>
                 return ret;<br>
 <br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))<br>
                 smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved,<br>
                                     smc_dpm_table, I2cControllers);<br>
         else<br>
@@ -725,7 +729,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,<br>
         uint32_t apu_percent = 0;<br>
         uint32_t dgpu_percent = 0;<br>
 <br>
-       switch (smu->adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 7):<br>
                 if (smu->smc_fw_version >= 0x3A4900)<br>
                         use_metrics_v3 = true;<br>
@@ -1385,8 +1389,9 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,<br>
                  * and onwards SMU firmwares.<br>
                  */<br>
                 smu_cmn_get_smc_version(smu, NULL, &smu_version);<br>
-               if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&<br>
-                    (smu_version < 0x003a2900))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                    IP_VERSION(11, 0, 7)) &&<br>
+                   (smu_version < 0x003a2900))<br>
                         break;<br>
 <br>
                 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");<br>
@@ -1494,7 +1499,7 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)<br>
         pstate_table->socclk_pstate.min = soc_table->min;<br>
         pstate_table->socclk_pstate.peak = soc_table->max;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 7):<br>
         case IP_VERSION(11, 0, 11):<br>
                 pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;<br>
@@ -1945,7 +1950,8 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,<br>
                 *size = 4;<br>
                 break;<br>
         case AMDGPU_PP_SENSOR_SS_APU_SHARE:<br>
-               if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {<br>
+               if (amdgpu_ip_version(adev, MP1_HWIP, 0) !=<br>
+                   IP_VERSION(11, 0, 7)) {<br>
                         ret = sienna_cichlid_get_smu_metrics_data(smu,<br>
                                                 METRICS_SS_APU_SHARE, (uint32_t *)data);<br>
                         *size = 4;<br>
@@ -1954,7 +1960,8 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,<br>
                 }<br>
                 break;<br>
         case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:<br>
-               if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {<br>
+               if (amdgpu_ip_version(adev, MP1_HWIP, 0) !=<br>
+                   IP_VERSION(11, 0, 7)) {<br>
                         ret = sienna_cichlid_get_smu_metrics_data(smu,<br>
                                                 METRICS_SS_DGPU_SHARE, (uint32_t *)data);<br>
                         *size = 4;<br>
@@ -1978,7 +1985,7 @@ static void sienna_cichlid_get_unique_id(struct smu_context *smu)<br>
 <br>
         /* Only supported as of version 0.58.83.0 and only on Sienna Cichlid */<br>
         if (smu->smc_fw_version < 0x3A5300 ||<br>
-           smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))<br>
+           amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7))<br>
                 return;<br>
 <br>
         if (sienna_cichlid_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))<br>
@@ -2148,8 +2155,8 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu,<br>
                                                         od_table->UclkFmax);<br>
 <br>
         smu_cmn_get_smc_version(smu, NULL, &smu_version);<br>
-       if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&<br>
-              (smu_version < 0x003a2900)))<br>
+       if (!((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) &&<br>
+             (smu_version < 0x003a2900)))<br>
                 dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);<br>
 }<br>
 <br>
@@ -2381,8 +2388,9 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,<br>
                  * and onwards SMU firmwares.<br>
                  */<br>
                 smu_cmn_get_smc_version(smu, NULL, &smu_version);<br>
-               if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&<br>
-                    (smu_version < 0x003a2900)) {<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                    IP_VERSION(11, 0, 7)) &&<br>
+                   (smu_version < 0x003a2900)) {<br>
                         dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "<br>
                                                 "only by 58.41.0 and onwards SMU firmwares!\n");<br>
                         return -EOPNOTSUPP;<br>
@@ -3105,7 +3113,8 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)<br>
         PPTable_t *pptable = table_context->driver_pptable;<br>
         int i;<br>
 <br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) {<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==<br>
+           IP_VERSION(11, 0, 13)) {<br>
                 beige_goby_dump_pptable(smu);<br>
                 return;<br>
         }<br>
@@ -3910,7 +3919,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,<br>
         uint16_t average_gfx_activity;<br>
         int ret = 0;<br>
 <br>
-       switch (smu->adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 7):<br>
                 if (smu->smc_fw_version >= 0x3A4900)<br>
                         use_metrics_v3 = true;<br>
@@ -4026,8 +4035,10 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,<br>
         gpu_metrics->current_fan_speed = use_metrics_v3 ? metrics_v3->CurrFanSpeed :<br>
                 use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;<br>
 <br>
-       if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) ||<br>
-             ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) {<br>
+       if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) &&<br>
+            smu->smc_fw_version > 0x003A1E00) ||<br>
+           ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11)) &&<br>
+            smu->smc_fw_version > 0x00410400)) {<br>
                 gpu_metrics->pcie_link_width = use_metrics_v3 ? metrics_v3->PcieWidth :<br>
                         use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;<br>
                 gpu_metrics->pcie_link_speed = link_speed[use_metrics_v3 ? metrics_v3->PcieRate :<br>
@@ -4253,7 +4264,7 @@ static int sienna_cichlid_get_default_config_table_settings(struct smu_context *<br>
         table->gfx_activity_average_tau = 10;<br>
         table->mem_activity_average_tau = 10;<br>
         table->socket_power_average_tau = 100;<br>
-       if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7))<br>
                 table->apu_socket_power_average_tau = 100;<br>
 <br>
         return 0;<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
index aa4a5498a12f..ece43b41141c 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
@@ -101,8 +101,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)<br>
         struct amdgpu_firmware_info *ucode = NULL;<br>
 <br>
         if (amdgpu_sriov_vf(adev) &&<br>
-           ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) ||<br>
-            (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))<br>
+           ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) ||<br>
+            (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7))))<br>
                 return 0;<br>
 <br>
         amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));<br>
@@ -213,7 +213,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)<br>
         if (smu->is_apu)<br>
                 adev->pm.fw_version = smu_version;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;<br>
                 break;<br>
@@ -246,7 +246,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)<br>
                 break;<br>
         default:<br>
                 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",<br>
-                       adev->ip_versions[MP1_HWIP][0]);<br>
+                       amdgpu_ip_version(adev, MP1_HWIP, 0));<br>
                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;<br>
                 break;<br>
         }<br>
@@ -474,9 +474,10 @@ int smu_v11_0_init_power(struct smu_context *smu)<br>
 {<br>
         struct amdgpu_device *adev = smu->adev;<br>
         struct smu_power_context *smu_power = &smu->smu_power;<br>
-       size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ?<br>
-                       sizeof(struct smu_11_5_power_context) :<br>
-                       sizeof(struct smu_11_0_power_context);<br>
+       size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                                     IP_VERSION(11, 5, 0) ?<br>
+                             sizeof(struct smu_11_5_power_context) :<br>
+                             sizeof(struct smu_11_0_power_context);<br>
 <br>
         smu_power->power_context = kzalloc(size, GFP_KERNEL);<br>
         if (!smu_power->power_context)<br>
@@ -731,10 +732,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)<br>
         /* Navy_Flounder/Dimgrey_Cavefish do not support to change<br>
          * display num currently<br>
          */<br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))<br>
                 return 0;<br>
 <br>
         return smu_cmn_send_smc_msg_with_param(smu,<br>
@@ -1103,7 +1104,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)<br>
         int ret = 0;<br>
         struct amdgpu_device *adev = smu->adev;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(11, 0, 0):<br>
         case IP_VERSION(11, 0, 5):<br>
         case IP_VERSION(11, 0, 9):<br>
@@ -1591,7 +1592,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)<br>
                 return 0;<br>
 <br>
         if (state == SMU_BACO_STATE_ENTER) {<br>
-               switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
                 case IP_VERSION(11, 0, 7):<br>
                 case IP_VERSION(11, 0, 11):<br>
                 case IP_VERSION(11, 0, 12):<br>
@@ -1610,7 +1611,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)<br>
                 default:<br>
                         if (!ras || !adev->ras_enabled ||<br>
                             adev->gmc.xgmi.pending_reset) {<br>
-                               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {<br>
+                               if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                                   IP_VERSION(11, 0, 2)) {<br>
                                         data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);<br>
                                         data |= 0x80000000;<br>
                                         WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);<br>
@@ -1894,7 +1896,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,<br>
          * Separate MCLK and SOCCLK soft min/max settings are not allowed<br>
          * on Arcturus.<br>
          */<br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {<br>
                 mclk_min = mclk_max = 0;<br>
                 socclk_min = socclk_max = 0;<br>
         }<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c<br>
index c8119491c516..8908bbb3ff1f 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c<br>
@@ -1198,8 +1198,12 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,<br>
                 *value = metrics->AverageUvdActivity / 100;<br>
                 break;<br>
         case METRICS_CURR_SOCKETPOWER:<br>
-               if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||<br>
-               ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))<br>
+               if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(12, 0, 1)) &&<br>
+                    (adev->pm.fw_version >= 0x40000f)) ||<br>
+                   ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==<br>
+                     IP_VERSION(12, 0, 0)) &&<br>
+                    (adev->pm.fw_version >= 0x373200)))<br>
                         *value = metrics->CurrentSocketPower << 8;<br>
                 else<br>
                         *value = (metrics->CurrentSocketPower << 8) / 1000;<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c<br>
index fd1798fd716e..d86499ac8931 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c<br>
@@ -196,9 +196,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)<br>
         if (!adev->scpm_enabled)<br>
                 return 0;<br>
 <br>
-       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||<br>
-           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||<br>
-           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))<br>
+       if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) ||<br>
+           (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) ||<br>
+           (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)))<br>
                 return 0;<br>
 <br>
         /* override pptable_id from driver parameter */<br>
@@ -234,7 +234,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)<br>
         struct amdgpu_device *adev = smu->adev;<br>
         uint32_t mp1_fw_flags;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 4):<br>
         case IP_VERSION(13, 0, 11):<br>
                 mp1_fw_flags = RREG32_PCIE(MP1_Public |<br>
@@ -269,7 +269,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)<br>
         smu_minor = (smu_version >> 8) & 0xff;<br>
         smu_debug = (smu_version >> 0) & 0xff;<br>
         if (smu->is_apu ||<br>
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))<br>
+           amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))<br>
                 adev->pm.fw_version = smu_version;<br>
 <br>
         /* only for dGPU w/ SMU13*/<br>
@@ -802,7 +802,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)<br>
         int ret = 0;<br>
         struct amdgpu_device *adev = smu->adev;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 0):<br>
         case IP_VERSION(13, 0, 1):<br>
         case IP_VERSION(13, 0, 3):<br>
@@ -1779,7 +1779,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,<br>
          * Unset those settings for SMU 13.0.2. As soft limits settings<br>
          * for those clock domains are not supported.<br>
          */<br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {<br>
                 mclk_min = mclk_max = 0;<br>
                 socclk_min = socclk_max = 0;<br>
                 vclk_min = vclk_max = 0;<br>
@@ -1926,7 +1926,7 @@ static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,<br>
 <br>
         ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);<br>
         /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */<br>
-       if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))<br>
+       if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))<br>
                 ++(*value);<br>
 <br>
         return ret;<br>
@@ -1986,7 +1986,7 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,<br>
                 return ret;<br>
         }<br>
 <br>
-       if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) {<br>
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {<br>
                 ret = smu_v13_0_get_fine_grained_status(smu,<br>
                                                         clk_type,<br>
                                                         &single_dpm_table->is_fine_grained);<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c<br>
index 093962a37688..a3cbe15c3123 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c<br>
@@ -2754,7 +2754,7 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)<br>
         uint32_t param;<br>
         struct amdgpu_device *adev = smu->adev;<br>
 <br>
-       switch (adev->ip_versions[MP1_HWIP][0]) {<br>
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {<br>
         case IP_VERSION(13, 0, 0):<br>
                 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */<br>
                 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);<br>
@@ -2787,7 +2787,7 @@ static int smu_v13_0_0_mode2_reset(struct smu_context *smu)<br>
         int ret;<br>
         struct amdgpu_device *adev = smu->adev;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))<br>
                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);<br>
         else<br>
                 return -EOPNOTSUPP;<br>
@@ -2799,7 +2799,7 @@ static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)<br>
 {<br>
         struct amdgpu_device *adev = smu->adev;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))<br>
                 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,<br>
                                                                                    FEATURE_PWR_GFX, NULL);<br>
         else<br>
@@ -2863,7 +2863,7 @@ static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)<br>
         if (ret)<br>
                 return -EOPNOTSUPP;<br>
 <br>
-       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&<br>
+       if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) &&<br>
                 (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))<br>
                 return ret;<br>
         else<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c<br>
index 626591f54bc4..bb98156b2fa1 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c<br>
@@ -1144,7 +1144,7 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)<br>
         smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION;<br>
         smu->is_apu = true;<br>
 <br>
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))<br>
+       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4))<br>
                 smu_v13_0_4_set_smu_mailbox_registers(smu);<br>
         else<br>
                 smu_v13_0_set_smu_mailbox_registers(smu);<br>
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c<br>
index 2e74d749efdd..2d1736234b4a 100644<br>
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c<br>
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c<br>
@@ -1024,24 +1024,24 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,<br>
         switch (clk_type) {<br>
         case SMU_GFXCLK:<br>
         case SMU_SCLK:<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))<br>
                         clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||<br>
-                       (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||<br>
+                       (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))<br>
                         clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;<br>
                 break;<br>
         case SMU_SOCCLK:<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))<br>
                         clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||<br>
-                       (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||<br>
+                       (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))<br>
                         clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;<br>
                 break;<br>
         case SMU_FCLK:<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))<br>
                         clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;<br>
-               if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||<br>
-                       (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))<br>
+               if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||<br>
+                       (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))<br>
                         clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;<br>
                 break;<br>
         default:<br>
-- <br>
2.25.1<br>
<br>
</div>
</span></font></div>
</div>
</body>
</html>