[PATCH 2/2] drm/amdkfd: Use better name for sdma queue non HWS path

Liu, Shaoyun Shaoyun.Liu at amd.com
Mon Sep 23 14:27:56 UTC 2019


Probably rename to sdma_rlc to avoid the  confusion of  rlc used in 
other amdgpu driver .

Regards

shaoyun.liu

On 2019-09-22 11:56 p.m., Zhao, Yong wrote:
> The old name is prone to confusion. The register offset is for a RLC queue
> rather than a SDMA engine. The value is not a base address, but a
> register offset.
>
> Change-Id: I55fb835f2105392344b1c17323bb55c03f927836
> Signed-off-by: Yong Zhao <Yong.Zhao at amd.com>
> ---
>   .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c   | 85 +++++++++---------
>   .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c    | 90 +++++++++----------
>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 10 +--
>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 10 +--
>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 86 +++++++++---------
>   5 files changed, 137 insertions(+), 144 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> index c9ce1516956e..d2c0666c2798 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> @@ -70,11 +70,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
>   	return (struct v9_sdma_mqd *)mqd;
>   }
>   
> -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
> +static uint32_t get_rlc_reg_offset(struct amdgpu_device *adev,
>   				unsigned int engine_id,
>   				unsigned int queue_id)
>   {
> -	uint32_t base[8] = {
> +	uint32_t sdma_engine_reg_base[8] = {
>   		SOC15_REG_OFFSET(SDMA0, 0,
>   				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
>   		SOC15_REG_OFFSET(SDMA1, 0,
> @@ -92,12 +92,11 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
>   		SOC15_REG_OFFSET(SDMA7, 0,
>   				 mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
>   	};
> -	uint32_t retval;
>   
> -	retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
> -					       mmSDMA0_RLC0_RB_CNTL);
> +	uint32_t retval = sdma_engine_reg_base[engine_id]
> +		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
>   
> -	pr_debug("sdma base address: 0x%x\n", retval);
> +	pr_debug("RLC register offset: 0x%x\n", retval);
>   
>   	return retval;
>   }
> @@ -107,22 +106,22 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	unsigned long end_jiffies;
>   	uint32_t data;
>   	uint64_t data64;
>   	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
>   		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
>   
>   	end_jiffies = msecs_to_jiffies(2000) + jiffies;
>   	while (true) {
> -		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		data = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -130,41 +129,41 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
>   	       m->sdmax_rlcx_doorbell_offset);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
>   			     ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
>   				m->sdmax_rlcx_rb_rptr_hi);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
>   	if (read_user_wptr(mm, wptr64, data64)) {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       lower_32_bits(data64));
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       upper_32_bits(data64));
>   	} else {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       m->sdmax_rlcx_rb_rptr);
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       m->sdmax_rlcx_rb_rptr_hi);
>   	}
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
>   			m->sdmax_rlcx_rb_base_hi);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
>   			m->sdmax_rlcx_rb_rptr_addr_lo);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
>   			m->sdmax_rlcx_rb_rptr_addr_hi);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
>   			     RB_ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
>   
>   	return 0;
>   }
> @@ -174,7 +173,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
>   			     uint32_t (**dump)[2], uint32_t *n_regs)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
> -	uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
> +	uint32_t rlc_reg_offset = get_rlc_reg_offset(adev, engine_id, queue_id);
>   	uint32_t i = 0, reg;
>   #undef HQD_N_REGS
>   #define HQD_N_REGS (19+6+7+10)
> @@ -184,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
>   		return -ENOMEM;
>   
>   	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
>   	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
>   	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   
>   	WARN_ON_ONCE(i != HQD_N_REGS);
>   	*n_regs = i;
> @@ -204,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t sdma_rlc_rb_cntl;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	sdma_rlc_rb_cntl = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   
>   	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
>   		return true;
> @@ -224,20 +223,20 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t temp;
>   	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
>   
>   	while (true) {
> -		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -245,14 +244,14 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
>   		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
>   
> -	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
> +	m->sdmax_rlcx_rb_rptr = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
>   	m->sdmax_rlcx_rb_rptr_hi =
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> index a4325db8d093..ee520ad90717 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> @@ -307,11 +307,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
>   	return 0;
>   }
>   
> -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
> +static uint32_t get_rlc_reg_offset(struct amdgpu_device *adev,
>   				unsigned int engine_id,
>   				unsigned int queue_id)
>   {
> -	uint32_t base[2] = {
> +	uint32_t sdma_engine_reg_base[2] = {
>   		SOC15_REG_OFFSET(SDMA0, 0,
>   				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
>   		/* On gfx10, mmSDMA1_xxx registers are defined NOT based
> @@ -323,12 +323,11 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
>   		SOC15_REG_OFFSET(SDMA1, 0,
>   				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
>   	};
> -	uint32_t retval;
>   
> -	retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
> -					       mmSDMA0_RLC0_RB_CNTL);
> +	uint32_t retval = sdma_engine_reg_base[engine_id]
> +		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
>   
> -	pr_debug("sdma base address: 0x%x\n", retval);
> +	pr_debug("RLC register offset: 0x%x\n", retval);
>   
>   	return retval;
>   }
> @@ -489,23 +488,23 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v10_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	unsigned long end_jiffies;
>   	uint32_t data;
>   	uint64_t data64;
>   	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
> -	pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
> +	pr_debug("sdma load base addr 0x%x for engine %d, queue %d\n", rlc_reg_offset, m->sdma_engine_id, m->sdma_queue_id);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
>   		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
>   
>   	end_jiffies = msecs_to_jiffies(2000) + jiffies;
>   	while (true) {
> -		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		data = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -513,41 +512,41 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
>   	       m->sdmax_rlcx_doorbell_offset);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
>   			     ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
>   				m->sdmax_rlcx_rb_rptr_hi);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
>   	if (read_user_wptr(mm, wptr64, data64)) {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       lower_32_bits(data64));
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       upper_32_bits(data64));
>   	} else {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       m->sdmax_rlcx_rb_rptr);
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       m->sdmax_rlcx_rb_rptr_hi);
>   	}
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
>   			m->sdmax_rlcx_rb_base_hi);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
>   			m->sdmax_rlcx_rb_rptr_addr_lo);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
>   			m->sdmax_rlcx_rb_rptr_addr_hi);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
>   			     RB_ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
>   
>   	return 0;
>   }
> @@ -557,28 +556,25 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
>   			     uint32_t (**dump)[2], uint32_t *n_regs)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
> -	uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
> +	uint32_t rlc_reg_offset = get_rlc_reg_offset(adev, engine_id, queue_id);
>   	uint32_t i = 0, reg;
>   #undef HQD_N_REGS
>   #define HQD_N_REGS (19+6+7+10)
>   
> -	pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
> -	pr_debug("sdma base addr %x\n", sdma_base_addr);
> -
>   	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
>   	if (*dump == NULL)
>   		return -ENOMEM;
>   
>   	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
>   	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
>   	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   
>   	WARN_ON_ONCE(i != HQD_N_REGS);
>   	*n_regs = i;
> @@ -612,14 +608,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v10_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t sdma_rlc_rb_cntl;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	sdma_rlc_rb_cntl = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   
>   	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
>   		return true;
> @@ -740,20 +736,20 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v10_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t temp;
>   	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
>   
>   	while (true) {
> -		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -761,14 +757,14 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
>   		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
>   
> -	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
> +	m->sdmax_rlcx_rb_rptr = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
>   	m->sdmax_rlcx_rb_rptr_hi =
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> index c6abcf72e822..d0517b7ae089 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> @@ -303,14 +303,14 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
>   	return 0;
>   }
>   
> -static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
> +static inline uint32_t get_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
>   {
>   	uint32_t retval;
>   
>   	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
>   			m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
>   
> -	pr_debug("sdma base address: 0x%x\n", retval);
> +	pr_debug("RLC register offset: 0x%x\n", retval);
>   
>   	return retval;
>   }
> @@ -417,7 +417,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   	uint32_t data;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   
>   	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
>   		m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
> @@ -528,7 +528,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
>   	uint32_t sdma_rlc_rb_cntl;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   
>   	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
>   
> @@ -650,7 +650,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   
>   	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
>   	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> index 4e8b4e949926..373501abdb6b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> @@ -260,13 +260,13 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
>   	return 0;
>   }
>   
> -static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
> +static inline uint32_t get_rlc_reg_offset(struct vi_sdma_mqd *m)
>   {
>   	uint32_t retval;
>   
>   	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
>   		m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
> -	pr_debug("sdma base address: 0x%x\n", retval);
> +	pr_debug("RLC register offset: 0x%x\n", retval);
>   
>   	return retval;
>   }
> @@ -402,7 +402,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   	uint32_t data;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
>   		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
>   
> @@ -521,7 +521,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
>   	uint32_t sdma_rlc_rb_cntl;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   
>   	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
>   
> @@ -646,7 +646,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(m);
> +	sdma_base_addr = get_rlc_reg_offset(m);
>   
>   	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
>   	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> index 55437f160a72..2dd5bc676029 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> @@ -226,22 +226,20 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
>   	return 0;
>   }
>   
> -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
> +static uint32_t get_rlc_reg_offset(struct amdgpu_device *adev,
>   				unsigned int engine_id,
>   				unsigned int queue_id)
>   {
> -	uint32_t base[2] = {
> +	uint32_t sdma_engine_reg_base[2] = {
>   		SOC15_REG_OFFSET(SDMA0, 0,
>   				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
>   		SOC15_REG_OFFSET(SDMA1, 0,
>   				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
>   	};
> -	uint32_t retval;
> +	uint32_t retval = sdma_engine_reg_base[engine_id]
> +		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
>   
> -	retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
> -					       mmSDMA0_RLC0_RB_CNTL);
> -
> -	pr_debug("sdma base address: 0x%x\n", retval);
> +	pr_debug("RLC register offset: 0x%x\n", retval);
>   
>   	return retval;
>   }
> @@ -388,22 +386,22 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	unsigned long end_jiffies;
>   	uint32_t data;
>   	uint64_t data64;
>   	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
>   		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
>   
>   	end_jiffies = msecs_to_jiffies(2000) + jiffies;
>   	while (true) {
> -		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		data = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -411,41 +409,41 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
>   	       m->sdmax_rlcx_doorbell_offset);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
>   			     ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
>   				m->sdmax_rlcx_rb_rptr_hi);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
>   	if (read_user_wptr(mm, wptr64, data64)) {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       lower_32_bits(data64));
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       upper_32_bits(data64));
>   	} else {
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
>   		       m->sdmax_rlcx_rb_rptr);
> -		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
> +		WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
>   		       m->sdmax_rlcx_rb_rptr_hi);
>   	}
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
>   			m->sdmax_rlcx_rb_base_hi);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
>   			m->sdmax_rlcx_rb_rptr_addr_lo);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
>   			m->sdmax_rlcx_rb_rptr_addr_hi);
>   
>   	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
>   			     RB_ENABLE, 1);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
>   
>   	return 0;
>   }
> @@ -455,7 +453,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
>   			     uint32_t (**dump)[2], uint32_t *n_regs)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
> -	uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
> +	uint32_t rlc_reg_offset = get_rlc_reg_offset(adev, engine_id, queue_id);
>   	uint32_t i = 0, reg;
>   #undef HQD_N_REGS
>   #define HQD_N_REGS (19+6+7+10)
> @@ -465,15 +463,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
>   		return -ENOMEM;
>   
>   	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
>   	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
>   	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
> -		DUMP_REG(sdma_base_addr + reg);
> +		DUMP_REG(rlc_reg_offset + reg);
>   
>   	WARN_ON_ONCE(i != HQD_N_REGS);
>   	*n_regs = i;
> @@ -507,14 +505,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t sdma_rlc_rb_cntl;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	sdma_rlc_rb_cntl = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   
>   	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
>   		return true;
> @@ -577,20 +575,20 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   {
>   	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>   	struct v9_sdma_mqd *m;
> -	uint32_t sdma_base_addr;
> +	uint32_t rlc_reg_offset;
>   	uint32_t temp;
>   	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
>   
>   	m = get_sdma_mqd(mqd);
> -	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
> +	rlc_reg_offset = get_rlc_reg_offset(adev, m->sdma_engine_id,
>   					    m->sdma_queue_id);
>   
> -	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
> +	temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
>   	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
>   
>   	while (true) {
> -		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
> +		temp = RREG32(rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
>   		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
>   			break;
>   		if (time_after(jiffies, end_jiffies))
> @@ -598,14 +596,14 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
>   		usleep_range(500, 1000);
>   	}
>   
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
> -	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
> +	WREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
>   		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
>   
> -	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
> +	m->sdmax_rlcx_rb_rptr = RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
>   	m->sdmax_rlcx_rb_rptr_hi =
> -		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
> +		RREG32(rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
>   
>   	return 0;
>   }


More information about the amd-gfx mailing list