[PATCH 19/22] drm/amdgpu: convert srbm lock to a spinlock
Andres Rodriguez
andresx7 at gmail.com
Tue Mar 7 23:51:11 UTC 2017
Replace adev->srbm_mutex with a spinlock adev->srbm_lock
Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 20 ++++++++++----------
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 22 +++++++++++-----------
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 ++--
9 files changed, 33 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 88c3176..17c777e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1362,21 +1362,21 @@ struct amdgpu_device {
struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
- struct mutex srbm_mutex;
+ spinlock_t srbm_lock;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
/* BIOS */
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5254562..a009990 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -162,30 +162,30 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
WREG32(mmSRBM_GFX_CNTL, value);
}
static void unlock_srbm(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
WREG32(mmSRBM_GFX_CNTL, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index db7410a..6b93a5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -123,30 +123,30 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
WREG32(mmSRBM_GFX_CNTL, value);
}
static void unlock_srbm(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
WREG32(mmSRBM_GFX_CNTL, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c706805..78bd0efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1688,21 +1688,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
- mutex_init(&adev->srbm_mutex);
+ spin_lock_init(&adev->srbm_lock);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
hash_init(adev->mn_hash);
amdgpu_check_arguments(adev);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 810bba5..83e92df 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -383,30 +383,30 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 wb_offset;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (j = 0; j < 16; j++) {
cik_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
/* XXX SDMA RLC - todo */
}
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index b0b0c89..0579f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1858,31 +1858,31 @@ static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
/*
* Configure apertures:
* LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
* Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
* GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
cik_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
}
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
/**
* gfx_v7_0_gpu_init - setup the 3D engine
*
* @adev: amdgpu_device pointer
*
* Configures the 3D engine and tiling configuration
* registers so that the 3D engine is usable.
*/
@@ -1912,31 +1912,31 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (i = 0; i < 16; i++) {
cik_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, 0);
}
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
gmc_v7_0_init_compute_vmid(adev);
WREG32(mmSX_DEBUG_1, 0x20);
WREG32(mmTA_CNTL_AUX, 0x00010000);
tmp = RREG32(mmSPI_CONFIG_CNTL);
tmp |= 0x03000000;
WREG32(mmSPI_CONFIG_CNTL, tmp);
@@ -2940,40 +2940,40 @@ struct hqd_registers
u32 cp_hqd_hq_scheduler1;
u32 cp_mqd_control;
};
static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, int me, int pipe)
{
u64 eop_gpu_addr;
u32 tmp;
size_t eop_offset = me * pipe * GFX7_MEC_HPD_SIZE * 2;
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
cik_srbm_select(adev, me, pipe, 0, 0);
/* write the EOP addr */
WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
/* set the VMID assigned */
WREG32(mmCP_HPD_EOP_VMID, 0);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(mmCP_HPD_EOP_CONTROL);
tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
WREG32(mmCP_HPD_EOP_CONTROL, tmp);
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
{
int i;
/* disable the queue if it's active */
if (RREG32(mmCP_HQD_ACTIVE) & 1) {
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -3188,29 +3188,29 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
if (r) {
dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
goto out_unreserve;
}
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
if (r) {
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
goto out_unreserve;
}
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
gfx_v7_0_mqd_deactivate(adev);
gfx_v7_0_mqd_commit(adev, mqd);
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
amdgpu_bo_kunmap(ring->mqd_obj);
out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
out:
return 0;
}
/**
* gfx_v7_0_cp_compute_resume - setup the compute queue registers
@@ -5062,28 +5062,28 @@ static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
DRM_ERROR("Ignoring request to enable interrupts for invalid me:%d\n", me);
return;
}
if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
DRM_ERROR("Ignoring request to enable interrupts for invalid "
"me:%d pipe:%d\n", pipe, me);
return;
}
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
cik_srbm_select(adev, me, pipe, 0, 0);
WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl;
switch (state) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5db5bac..547c172 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3898,50 +3898,50 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
SH_MEM_CONFIG__PRIVATE_ATC_MASK;
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
}
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
{
u32 tmp;
int i;
WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (i = 0; i < 16; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp);
} else {
@@ -3950,21 +3950,21 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp);
}
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
gfx_v8_0_init_compute_vmid(adev);
mutex_lock(&adev->grbm_idx_mutex);
/*
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -5019,49 +5019,49 @@ static int gfx_v8_0_kiq_queue_init(struct amdgpu_ring *ring,
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
is_kiq = true;
if (is_kiq) {
eop_gpu_addr = kiq->eop_gpu_addr;
gfx_v8_0_kiq_setting(&kiq->ring);
} else
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
ring->queue * GFX8_MEC_HPD_SIZE;
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(adev, mqd, mqd_gpu_addr, eop_gpu_addr, ring);
if (is_kiq) {
r = gfx_v8_0_mqd_deactivate(adev);
if (r) {
dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
goto out_unlock;
}
gfx_v8_0_enable_doorbell(adev, ring->use_doorbell);
gfx_v8_0_mqd_commit(adev, mqd);
}
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
if (is_kiq)
gfx_v8_0_kiq_enable(ring);
else
gfx_v8_0_map_queue_enable(&kiq->ring, ring);
return 0;
out_unlock:
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
return r;
}
static void gfx_v8_0_kiq_free_queue(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -5178,37 +5178,37 @@ static int gfx_v8_0_compute_queue_init(struct amdgpu_device *adev,
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
goto out_unreserve;
}
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX8_MEC_HPD_SIZE);
eop_gpu_addr >>= 8;
/* init the mqd struct */
memset(mqd, 0, sizeof(struct vi_mqd));
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(adev, mqd, mqd_gpu_addr, eop_gpu_addr, ring);
r = gfx_v8_0_mqd_deactivate(adev);
if (r) {
dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
goto out_unlock;
}
gfx_v8_0_enable_doorbell(adev, ring->use_doorbell);
gfx_v8_0_mqd_commit(adev, mqd);
out_unlock:
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
amdgpu_bo_kunmap(ring->mqd_obj);
out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
out:
return r;
}
static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
{
@@ -6808,28 +6808,28 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
DRM_ERROR("Ignoring request to enable interrupts for invalid me:%d\n", me);
return;
}
if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
DRM_ERROR("Ignoring request to enable interrupts for invalid "
"me:%d pipe:%d\n", pipe, me);
return;
}
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
vi_srbm_select(adev, me, pipe, 0, 0);
WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
}
static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 896be64..3d511e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -417,29 +417,29 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 wb_offset;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31375bd..f4fa136 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -610,29 +610,29 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- mutex_lock(&adev->srbm_mutex);
+ spin_lock(&adev->srbm_lock);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ spin_unlock(&adev->srbm_lock);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
--
2.9.3
More information about the amd-gfx
mailing list