[PATCH 04/33] drm/omap: add dmm_read() and dmm_write() wrappers
Laurent Pinchart
laurent.pinchart at ideasonboard.com
Tue Feb 23 21:18:42 UTC 2016
Hi Tomi,
Thank you for the patch.
On Friday 19 February 2016 11:47:39 Tomi Valkeinen wrote:
> This patch adds wrapper functions for readl() and writel(), dmm_read()
> and dmm_write(), so that we can implement workaround for errata i878.
>
> Signed-off-by: Tomi Valkeinen <tomi.valkeinen at ti.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart at ideasonboard.com>
> ---
> drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 39 ++++++++++++++++++-----------
> 1 file changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
> b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 4e04f9487375..fe5260477b52
> 100644
> --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
> +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
> @@ -79,6 +79,16 @@ static const uint32_t reg[][4] = {
> DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
> };
>
> +static u32 dmm_read(struct dmm *dmm, u32 reg)
> +{
> + return readl(dmm->base + reg);
> +}
> +
> +static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
> +{
> + writel(val, dmm->base + reg);
> +}
> +
> /* simple allocator to grab next 16 byte aligned memory from txn */
> static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
> {
> @@ -108,7 +118,7 @@ static int wait_status(struct refill_engine *engine,
> uint32_t wait_mask)
>
> i = DMM_FIXED_RETRY_COUNT;
> while (true) {
> - r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
> + r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
> err = r & DMM_PATSTATUS_ERR;
> if (err)
> return -EFAULT;
> @@ -140,11 +150,11 @@ static void release_engine(struct refill_engine
> *engine) static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
> {
> struct dmm *dmm = arg;
> - uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
> + uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
> int i;
>
> /* ack IRQ */
> - writel(status, dmm->base + DMM_PAT_IRQSTATUS);
> + dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
>
> for (i = 0; i < dmm->num_engines; i++) {
> if (status & DMM_IRQSTAT_LST) {
> @@ -275,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool
> wait) readl(&txn->last_pat->next_pa);
>
> /* write to PAT_DESCR to clear out any pending transaction */
> - writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
> + dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
>
> /* wait for engine ready: */
> ret = wait_status(engine, DMM_PATSTATUS_READY);
> @@ -291,8 +301,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool
> wait) smp_mb();
>
> /* kick reload */
> - writel(engine->refill_pa,
> - dmm->base + reg[PAT_DESCR][engine->id]);
> + dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
>
> if (wait) {
> if (!wait_for_completion_timeout(&engine->compl,
> @@ -659,7 +668,7 @@ static int omap_dmm_probe(struct platform_device *dev)
>
> omap_dmm->dev = &dev->dev;
>
> - hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
> + hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
> omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
> omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
> omap_dmm->container_width = 256;
> @@ -668,7 +677,7 @@ static int omap_dmm_probe(struct platform_device *dev)
> atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
>
> /* read out actual LUT width and height */
> - pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
> + pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
> omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
> omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
>
> @@ -678,12 +687,12 @@ static int omap_dmm_probe(struct platform_device *dev)
> omap_dmm->num_lut++;
>
> /* initialize DMM registers */
> - writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
> - writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
> - writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
> - writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
> - writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
> - writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
> + dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
> + dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
> + dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
> + dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
> + dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
> + dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
>
> ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
> "omap_dmm_irq_handler", omap_dmm);
> @@ -701,7 +710,7 @@ static int omap_dmm_probe(struct platform_device *dev)
> * buffers for accelerated pan/scroll) and FILL_DSC<n> which
> * we just generally don't care about.
> */
> - writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
> + dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
>
> omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
> if (!omap_dmm->dummy_page) {
--
Regards,
Laurent Pinchart
More information about the dri-devel
mailing list