[PATCH 11/12] drm/amd: add ACP suspend/resume functionality
Alex Deucher
alexdeucher at gmail.com
Thu Jul 9 09:21:10 PDT 2015
From: Maruthi Srinivas Bayyavarapu <Maruthi.Bayyavarapu at amd.com>
ACP IP can be powered on and off during system wide suspend/resume
transition. AMD ASoC PCM device will use this module during
system suspend/resume and PCM device's runtime pm.
Also, updated code comments.
Signed-off-by: Maruthi Bayyavarapu <maruthi.bayyavarapu at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
Reviewed-by: Murali Krishna Vemuri <murali-krishna.vemuri at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
drivers/gpu/drm/amd/acp/acp_hw.c | 220 +++++++++++++++++++++++-------
drivers/gpu/drm/amd/acp/acp_hw.h | 19 +++
drivers/gpu/drm/amd/acp/include/amd_acp.h | 18 +++
3 files changed, 204 insertions(+), 53 deletions(-)
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.c b/drivers/gpu/drm/amd/acp/acp_hw.c
index 0a07774..94b2c56 100644
--- a/drivers/gpu/drm/amd/acp/acp_hw.c
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -40,17 +40,16 @@
#include <linux/delay.h>
#include <linux/errno.h>
-#define VISLANDS30_IV_SRCID_ACP 0x000000a2
-
#include "acp_gfx_if.h"
#include "acp_hw.h"
#include "acp_2_2_d.h"
#include "acp_2_2_sh_mask.h"
+#define VISLANDS30_IV_SRCID_ACP 0x000000a2
+
/* Configure a given dma channel parameters - enable/disble,
* number of descriptors, priority */
-
static void config_acp_dma_channel(struct amd_acp_device *acp_dev, u8 ch_num,
u16 dscr_strt_idx, u16 num_dscrs,
enum acp_dma_priority_level priority_level)
@@ -58,38 +57,30 @@ static void config_acp_dma_channel(struct amd_acp_device *acp_dev, u8 ch_num,
u32 dma_ctrl;
struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
- /* read the dma control register and disable the channel run field */
+ /* disable the channel run field */
dma_ctrl = cgs_read_register(acp_prv->cgs_device,
mmACP_DMA_CNTL_0 + ch_num);
- /* clear the dma channel control bits */
dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRun_MASK;
-
cgs_write_register(acp_prv->cgs_device, (mmACP_DMA_CNTL_0 + ch_num),
dma_ctrl);
- /* there is no transfer happening on this channel so
- * program DMAChDscrStrIdx to the index number of the first descriptor
- * to be processed.
- */
+ /* program a DMA channel with first descriptor to be processed. */
cgs_write_register(acp_prv->cgs_device,
(mmACP_DMA_DSCR_STRT_IDX_0 + ch_num),
(ACP_DMA_DSCR_STRT_IDX_0__DMAChDscrStrtIdx_MASK &
dscr_strt_idx));
- /* program DMAChDscrDscrCnt to the number of descriptors to be
- * processed in the transfer
- */
+ /* program a DMA channel with the number of descriptors to be
+ * processed in the transfer */
cgs_write_register(acp_prv->cgs_device,
(mmACP_DMA_DSCR_CNT_0 + ch_num),
(ACP_DMA_DSCR_CNT_0__DMAChDscrCnt_MASK & num_dscrs));
- /* set DMAChPrioLvl according to the priority */
+ /* set DMA channel priority */
cgs_write_register(acp_prv->cgs_device, (mmACP_DMA_PRIO_0 + ch_num),
priority_level);
}
-
-
/* Initialize the dma descriptors location in SRAM and page size */
static void acp_dma_descr_init(struct amd_acp_private *acp_prv)
{
@@ -143,7 +134,9 @@ static void config_dma_descriptor_in_sram(struct amd_acp_device *acp_dev,
descr_info->size_xfer_dir.val);
}
-/* Initialize the DMA descriptor information */
+/* Initialize the DMA descriptor information for transfer between
+ * system memory <-> ACP SRAM
+ */
static void set_acp_sysmem_dma_descriptors(struct amd_acp_device *acp_dev,
u32 size, int direction,
u32 pte_offset)
@@ -215,7 +208,9 @@ static void set_acp_sysmem_dma_descriptors(struct amd_acp_device *acp_dev,
}
}
-/* Initialize the i2s dma descriptors in SRAM */
+/* Initialize the DMA descriptor information for transfer between
+ * ACP SRAM <-> I2S
+ */
static void set_acp_to_i2s_dma_descriptors(struct amd_acp_device *acp_dev,
u32 size, int direction)
{
@@ -226,9 +221,6 @@ static void set_acp_to_i2s_dma_descriptors(struct amd_acp_device *acp_dev,
num_descr = 2;
- /* Let I2s Know the direction of transfer and source/destination
- * of data
- */
dmadscr[0].size_xfer_dir.val = (u32) 0x0;
if (direction == STREAM_PLAYBACK) {
dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH13;
@@ -304,7 +296,7 @@ static u16 get_dscr_idx(struct amd_acp_device *acp_dev, int direction)
}
-/* Create page table entries in ACP SRAM for the allocated memory */
+/* Create page table entries in ACP SRAM for the allocated memory */
static void acp_pte_config(struct amd_acp_device *acp_dev, struct page *pg,
u16 num_of_pages, u32 pte_offset)
{
@@ -344,7 +336,6 @@ static void acp_pte_config(struct amd_acp_device *acp_dev, struct page *pg,
}
}
-
/* enables/disables ACP's external interrupt */
static void acp_enable_external_interrupts(struct amd_acp_device *acp_dev,
int enable)
@@ -361,8 +352,8 @@ static void acp_enable_external_interrupts(struct amd_acp_device *acp_dev,
mmACP_EXTERNAL_INTR_ENB, acp_ext_intr_enb);
}
-/* Clear (acknowledge) DMA 'Interrupt on Complete' (IOC) in ACP
- * external interrupt status register
+/* Clear (acknowledge) DMA 'Interrupt on Complete' (IOC) in ACP
+ * external interrupt status register
*/
static void acp_ext_stat_clear_dmaioc(struct amd_acp_device *acp_dev, u8 ch_num)
{
@@ -382,7 +373,7 @@ static void acp_ext_stat_clear_dmaioc(struct amd_acp_device *acp_dev, u8 ch_num)
}
}
-/* Check whether interrupt (IOC) is generated or not */
+/* Check whether ACP DMA interrupt (IOC) is generated or not */
static u16 acp_get_intr_flag(struct amd_acp_device *acp_dev)
{
u32 ext_intr_status;
@@ -412,7 +403,6 @@ static int irq_set_source(void *private_data, unsigned src_id, unsigned type,
}
}
-
static inline void i2s_clear_irqs(struct amd_acp_device *acp_dev,
int direction)
{
@@ -516,17 +506,17 @@ static void config_acp_dma(struct amd_acp_device *acp_dev,
acp_pte_config(acp_dev, dma_config->pg, dma_config->num_of_pages,
pte_offset);
- /* Configure System memory to acp dma descriptors */
+ /* Configure System memory <-> ACP SRAM DMA descriptors */
set_acp_sysmem_dma_descriptors(acp_dev, dma_config->size,
dma_config->direction,
pte_offset);
- /* Configure acp to i2s dma descriptors */
+ /* Configure ACP SRAM <-> I2S DMA descriptors */
set_acp_to_i2s_dma_descriptors(acp_dev, dma_config->size,
dma_config->direction);
}
-/* Start a given dma channel */
+/* Start a given DMA channel transfer */
static int acp_dma_start(struct amd_acp_device *acp_dev,
u16 ch_num, bool is_circular)
{
@@ -572,7 +562,7 @@ static int acp_dma_start(struct amd_acp_device *acp_dev,
return status;
}
-/* Stop a given dma channel number*/
+/* Stop a given DMA channel transfer */
static int acp_dma_stop(struct amd_acp_device *acp_dev, u8 ch_num)
{
int status = STATUS_UNSUCCESSFUL;
@@ -584,7 +574,6 @@ static int acp_dma_stop(struct amd_acp_device *acp_dev, u8 ch_num)
if (acp_dev == NULL)
return status;
- /* register mask value to check the channel status bits */
dma_ctrl = cgs_read_register(acp_prv->cgs_device,
mmACP_DMA_CNTL_0 + ch_num);
@@ -636,6 +625,7 @@ static int acp_dma_stop(struct amd_acp_device *acp_dev, u8 ch_num)
return status;
}
+/* ACP DMA irq handler routine for playback, capture usecases */
static int dma_irq_handler(void *prv_data)
{
u16 play_acp_i2s_intr, cap_i2s_acp_intr, cap_acp_sysram_intr;
@@ -655,7 +645,7 @@ static int dma_irq_handler(void *prv_data)
if (!play_acp_i2s_intr && !cap_i2s_acp_intr && !cap_acp_sysram_intr) {
/* We registered for DMA Interrupt-On-Complete interrupts only.
- * If we hit here, log, acknowledge it and return. */
+ * If we hit here, log it and return. */
ext_intr_status = cgs_read_register(acp_prv->cgs_device,
mmACP_EXTERNAL_INTR_STAT);
pr_info("ACP: Not a DMA IOC irq: %x\n", ext_intr_status);
@@ -702,16 +692,92 @@ static int irq_handler(void *private_data, unsigned src_id,
return -1;
}
+/* power off a tile/block within ACP */
+static void acp_suspend_tile(struct amd_acp_private *acp_prv, int tile)
+{
+ u32 val = 0;
+ u32 timeout = 0;
+
+ if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+ pr_err(" %s : Invalid ACP power tile index\n", __func__);
+ return;
+ }
+
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_READ_REG_0 + tile);
+ val &= ACP_TILE_ON_MASK;
+
+ if (val == 0x0) {
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_RETAIN_REG);
+ val = val | (1 << tile);
+ cgs_write_register(acp_prv->cgs_device, mmACP_PGFSM_RETAIN_REG,
+ val);
+ cgs_write_register(acp_prv->cgs_device, mmACP_PGFSM_CONFIG_REG,
+ 0x500 + tile);
+
+ timeout = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (timeout--) {
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_READ_REG_0 + tile);
+ val = val & ACP_TILE_ON_MASK;
+ if (val == ACP_TILE_OFF_MASK)
+ break;
+ }
+
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_RETAIN_REG);
+
+ val |= ACP_TILE_OFF_RETAIN_REG_MASK;
+ cgs_write_register(acp_prv->cgs_device, mmACP_PGFSM_RETAIN_REG,
+ val);
+ }
+
+}
+
+/* power on a tile/block within ACP */
+static void acp_resume_tile(struct amd_acp_private *acp_prv, int tile)
+{
+ u32 val = 0;
+ u32 timeout = 0;
+
+ if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+ pr_err(" %s : Invalid ACP power tile index\n", __func__);
+ return;
+ }
+
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_READ_REG_0 + tile);
+ val = val & ACP_TILE_ON_MASK;
+
+ if (val != 0x0) {
+ cgs_write_register(acp_prv->cgs_device, mmACP_PGFSM_CONFIG_REG,
+ 0x600 + tile);
+ timeout = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (timeout--) {
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_READ_REG_0 + tile);
+ val = val & ACP_TILE_ON_MASK;
+ if (val == 0x0)
+ break;
+ }
+ val = cgs_read_register(acp_prv->cgs_device,
+ mmACP_PGFSM_RETAIN_REG);
+ if (tile == ACP_TILE_P1)
+ val = val & (ACP_TILE_P1_MASK);
+ else if (tile == ACP_TILE_P2)
+ val = val & (ACP_TILE_P2_MASK);
+
+ cgs_write_register(acp_prv->cgs_device, mmACP_PGFSM_RETAIN_REG,
+ val);
+ }
+}
+
/* Initialize and bring ACP hardware to default state. */
-static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
+static void acp_init(struct amd_acp_private *acp_prv)
{
u32 val;
u32 timeout_value;
- int acp_hw_init_status = STATUS_UNSUCCESSFUL;
- struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
-
- if (acp_dev == NULL)
- return acp_hw_init_status;
/* Assert Soft reset of ACP */
val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
@@ -728,7 +794,7 @@ static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
break;
}
- /* Enabling clock to ACP and waits until the clock is enabled */
+ /* Enable clock to ACP and wait until the clock is enabled */
val = cgs_read_register(acp_prv->cgs_device, mmACP_CONTROL);
val = val | ACP_CONTROL__ClkEn_MASK;
cgs_write_register(acp_prv->cgs_device, mmACP_CONTROL, val);
@@ -757,7 +823,6 @@ static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
acp_dma_descr_init(acp_prv);
- /* DMA DSCR BASE ADDRESS IN SRAM */
cgs_write_register(acp_prv->cgs_device, mmACP_DMA_DESC_BASE_ADDR,
ACP_SRAM_BASE_ADDRESS);
@@ -768,20 +833,34 @@ static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
cgs_write_register(acp_prv->cgs_device, mmACP_EXTERNAL_INTR_CNTL,
ACP_EXTERNAL_INTR_CNTL__DMAIOCMask_MASK);
+ pr_info("ACP: Initialized.\n");
+
+}
+
+static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
+{
+ struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+ acp_init(acp_prv);
+
cgs_add_irq_source(acp_prv->cgs_device, VISLANDS30_IV_SRCID_ACP, 1,
- irq_set_source, irq_handler, iprv);
+ irq_set_source, irq_handler, iprv);
+
+ /* Disable DSPs which are not used */
+ acp_suspend_tile(acp_prv, ACP_TILE_DSP0);
+ acp_suspend_tile(acp_prv, ACP_TILE_DSP1);
+ acp_suspend_tile(acp_prv, ACP_TILE_DSP2);
- pr_info("ACP: Initialized.\n");
return STATUS_SUCCESS;
}
-static void acp_hw_deinit(struct amd_acp_device *acp_dev)
+/* Deintialize ACP */
+static void acp_deinit(struct amd_acp_private *acp_prv)
{
u32 val;
u32 timeout_value;
- struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
- /* Assert Soft reset of ACP */
+ /* Assert Soft reset of ACP */
val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
@@ -795,7 +874,7 @@ static void acp_hw_deinit(struct amd_acp_device *acp_dev)
break;
}
}
- /** Disable ACP clock */
+ /** Disable ACP clock */
val = cgs_read_register(acp_prv->cgs_device, mmACP_CONTROL);
val &= ~ACP_CONTROL__ClkEn_MASK;
cgs_write_register(acp_prv->cgs_device, mmACP_CONTROL, val);
@@ -812,9 +891,15 @@ static void acp_hw_deinit(struct amd_acp_device *acp_dev)
pr_info("ACP: De-Initialized.\n");
}
+static void acp_hw_deinit(struct amd_acp_device *acp_dev)
+{
+ struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+ acp_deinit(acp_prv);
+}
-/* Get the number of bytes consumed for SRAM_TO_I2S DMA
- * channel during rendering
+/* Update DMA postion in audio ring buffer at period level granularity.
+ * This will be used by ALSA PCM driver
*/
static u32 acp_update_dma_pointer(struct amd_acp_device *acp_dev, int direction,
u32 period_size)
@@ -824,6 +909,7 @@ static u32 acp_update_dma_pointer(struct amd_acp_device *acp_dev, int direction,
u32 mul;
u32 dma_config;
struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
pos = 0;
if (direction == STREAM_PLAYBACK) {
@@ -850,8 +936,8 @@ static u32 acp_update_dma_pointer(struct amd_acp_device *acp_dev, int direction,
return pos;
}
-/* Wait for complete buffering to complete in HOST
- * to SRAM DMA channel
+/* Wait for initial buffering to complete in HOST to SRAM DMA channel
+ * for plaback usecase
*/
static void wait_for_prebuffer_finish(struct amd_acp_device *acp_dev)
{
@@ -927,6 +1013,22 @@ static void configure_i2s(struct amd_acp_device *acp_dev,
configure_i2s_stream(acp_dev, i2s_config);
}
+void amd_acp_pcm_suspend(struct amd_acp_device *acp_dev)
+{
+ struct amd_acp_private *acp_prv;
+
+ acp_prv = (struct amd_acp_private *)acp_dev;
+ amd_acp_suspend(acp_prv);
+}
+
+void amd_acp_pcm_resume(struct amd_acp_device *acp_dev)
+{
+ struct amd_acp_private *acp_prv;
+
+ acp_prv = (struct amd_acp_private *)acp_dev;
+ amd_acp_resume(acp_prv);
+}
+
int amd_acp_hw_init(void *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor,
struct amd_acp_private **acp_private)
@@ -962,6 +1064,9 @@ int amd_acp_hw_init(void *cgs_device,
(*acp_private)->public.i2s_start = i2s_start;
(*acp_private)->public.i2s_stop = i2s_stop;
+ (*acp_private)->public.acp_suspend = amd_acp_pcm_suspend;
+ (*acp_private)->public.acp_resume = amd_acp_pcm_resume;
+
return 0;
}
@@ -973,10 +1078,19 @@ int amd_acp_hw_fini(struct amd_acp_private *acp_private)
void amd_acp_suspend(struct amd_acp_private *acp_private)
{
- /* TODO */
+ acp_suspend_tile(acp_private, ACP_TILE_P2);
+ acp_suspend_tile(acp_private, ACP_TILE_P1);
}
void amd_acp_resume(struct amd_acp_private *acp_private)
{
- /* TODO */
+ acp_resume_tile(acp_private, ACP_TILE_P1);
+ acp_resume_tile(acp_private, ACP_TILE_P2);
+
+ acp_init(acp_private);
+
+ /* Disable DSPs which are not going to be used */
+ acp_suspend_tile(acp_private, ACP_TILE_DSP0);
+ acp_suspend_tile(acp_private, ACP_TILE_DSP1);
+ acp_suspend_tile(acp_private, ACP_TILE_DSP2);
}
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.h b/drivers/gpu/drm/amd/acp/acp_hw.h
index 5e7e225..4aa6b1c 100644
--- a/drivers/gpu/drm/amd/acp/acp_hw.h
+++ b/drivers/gpu/drm/amd/acp/acp_hw.h
@@ -40,6 +40,25 @@
#define FROM_ACP_I2S_1 0xa
#define FROM_ACP_I2S_2 0xb
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+#define ACP_TILE_DSP2_MASK 0x2f
+
+enum {
+ ACP_TILE_P1 = 0,
+ ACP_TILE_P2,
+ ACP_TILE_DSP0,
+ ACP_TILE_DSP1,
+ ACP_TILE_DSP2,
+};
+
enum {
STREAM_PLAYBACK = 0,
STREAM_CAPTURE,
diff --git a/drivers/gpu/drm/amd/acp/include/amd_acp.h b/drivers/gpu/drm/amd/acp/include/amd_acp.h
index 988d72a..8c1d31a 100644
--- a/drivers/gpu/drm/amd/acp/include/amd_acp.h
+++ b/drivers/gpu/drm/amd/acp/include/amd_acp.h
@@ -189,8 +189,26 @@ struct amd_acp_device {
*/
void (*i2s_stop)(struct amd_acp_device *acp_dev, int direction);
+ /**
+ * acp_suspend() - Power off ACP
+ * @acp_dev: acp device
+ *
+ * Switch off power tiles of ACP
+ */
+ void (*acp_suspend)(struct amd_acp_device *acp_dev);
+
+ /**
+ * acp_resume() - Power on ACP
+ * @acp_dev: acp device
+ *
+ * Switch on power tiles of ACP
+ */
+
+ void (*acp_resume)(struct amd_acp_device *acp_dev);
+
/* TODO: Need callback registration interface for asynchronous
* notifications */
+
};
#endif /* _AMD_ACP_H */
--
1.8.3.1
More information about the dri-devel
mailing list