[Openchrome-devel] drm-openchrome: 2 commits - drivers/gpu/drm
James Simmons
jsimmons at kemper.freedesktop.org
Sat Feb 9 10:00:29 PST 2013
drivers/gpu/drm/via/Makefile | 4
drivers/gpu/drm/via/via_dma.h | 157 ++++++++++++++++++++
drivers/gpu/drm/via/via_dmabuffer.h | 158 --------------------
drivers/gpu/drm/via/via_drv.h | 3
drivers/gpu/drm/via/via_fence.c | 282 ++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/via/via_fence.h | 99 ++++++++++++
drivers/gpu/drm/via/via_h1_cmdbuf.c | 10 -
drivers/gpu/drm/via/via_h1_dma.c | 177 ++++++++++------------
drivers/gpu/drm/via/via_ttm.c | 4
9 files changed, 627 insertions(+), 267 deletions(-)
New commits:
commit aa74d1a0f99acb1f0f376aef2ae8d2826c9189ec
Author: James Simmons <jsimmons at infradead.org>
Date: Sat Feb 9 13:00:17 2013 -0500
The start of basic fence handling. We have via_fence which is the object used to act as a barrier. Different classes of fences are handled by the hardware in very similiar ways. For these cases each of these fences will be mapped to the same via_fence_pool. The via_fence_pool handles the shared functionality that different types of fences have in common. Each pool itself supported different number of types of fences so to handle a specific type the via_fence_engine was created. An easy example of this is the DMA blit engine. We have a fence_pool to represent blits in general and for the fence pool we have 4 engines were each one represents a DMA channel that can be used. The reason a fence is mapped to a pool instead of a engine is this design allows a fence to map to different engines at the same time. Also note the way we handle sequencing is very different than other drivers. Most drivers handle ordering of the commands in a pipe line fashion were our driver does it in
a more parallel fashion. With this approach the sequence number does not have to sequenctial. Also we don't have to worry about over flow of the next sequence number. The bonus is that hastables also preform better then list iternations as the number of items increases.
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
index 2912e41..136014b 100644
--- a/drivers/gpu/drm/via/Makefile
+++ b/drivers/gpu/drm/via/Makefile
@@ -4,9 +4,9 @@
ccflags-y := -Iinclude/drm
via-y := via_drv.o via_pm.o via_i2c.o via_irq.o via_verifier.o via_ioc32.o \
- init_ttm.o ttm_gem.o via_ttm.o via_fb.o via_sgdma.o \
+ init_ttm.o ttm_gem.o via_ttm.o via_fence.o via_sgdma.o \
via_h1_dma.o via_h1_cmdbuf.o via_video.o \
- via_display.o via_crtc.o crtc_hw.o via_clocks.o \
+ via_display.o via_crtc.o via_fb.o crtc_hw.o via_clocks.o \
via_analog.o via_lvds.o
obj-$(CONFIG_DRM_VIA) += via.o
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index f5e50ac..2efb1c6 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -46,6 +46,7 @@
#include <drm/via_drm.h>
#include "via_regs.h"
+#include "via_fence.h"
#include "via_dma.h"
#include "via_verifier.h"
#include "via_display.h"
diff --git a/drivers/gpu/drm/via/via_fence.c b/drivers/gpu/drm/via/via_fence.c
new file mode 100644
index 0000000..4b84885
--- /dev/null
+++ b/drivers/gpu/drm/via/via_fence.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2013 James Simmons <jsimmons at infradead.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/random.h>
+#include "drmP.h"
+
+#include "via_drv.h"
+
+static void
+via_fence_destroy(struct kref *kref)
+{
+ struct via_fence *fence = container_of(kref, struct via_fence, kref);
+
+ if (fence->pool->fence_cleanup)
+ fence->pool->fence_cleanup(fence);
+ kfree(fence);
+}
+
+struct via_fence *
+via_fence_create_and_emit(struct via_fence_pool *pool, void *data,
+ unsigned int engine)
+{
+ struct via_fence *fence = NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence) {
+ struct drm_hash_item *hash;
+ int ret = -EINVAL;
+
+ fence->timeout = jiffies + 3 * HZ;
+ fence->engine = engine;
+ fence->pool = pool;
+ fence->priv = data;
+ kref_init(&fence->kref);
+
+ if (engine >= pool->num_engines) {
+ via_fence_unref((void **) &fence);
+ return ERR_PTR(-ENXIO);
+ }
+ spin_lock(&pool->lock);
+try_again:
+ /* I like to use get_random_init but it is not exported :-( */
+ get_random_bytes(&fence->seq.key, 3);
+ /* For the small change you get a zero */
+ if (!fence->seq.key)
+ goto try_again;
+
+ if (!drm_ht_find_item(&pool->pending, fence->seq.key, &hash))
+ goto try_again;
+
+ if (!drm_ht_insert_item(&pool->pending, &fence->seq))
+ ret = pool->fence_emit(fence);
+ spin_unlock(&pool->lock);
+
+ if (ret) {
+ drm_ht_remove_item(&pool->pending, &fence->seq);
+ via_fence_unref((void **) &fence);
+ fence = ERR_PTR(ret);
+ }
+ }
+ return fence;
+}
+
+static void
+via_fence_work(struct work_struct *work)
+{
+ struct via_fence_engine *eng = container_of(work, struct via_fence_engine,
+ fence_work);
+ uint32_t seq = readl(eng->read_seq);
+ struct drm_hash_item *hash;
+
+ spin_lock(&eng->pool->lock);
+ if (!drm_ht_find_item(&eng->pool->pending, seq, &hash)) {
+ drm_ht_remove_item(&eng->pool->pending, hash);
+ if (eng->pool->fence_signaled) {
+ struct via_fence *fence;
+
+ fence = drm_hash_entry(hash, struct via_fence, seq);
+ if (eng->pool->fence_signaled)
+ eng->pool->fence_signaled(fence);
+ }
+ }
+ spin_unlock(&eng->pool->lock);
+}
+
+static bool
+via_fence_seq_signaled(struct via_fence *fence, u64 seq)
+{
+ struct drm_hash_item *key;
+ bool ret = false;
+
+ /* Still waiting to be processed */
+ spin_lock(&fence->pool->lock);
+ if (!drm_ht_find_item(&fence->pool->pending, seq, &key))
+ ret = true;
+ spin_unlock(&fence->pool->lock);
+ return ret;
+}
+
+/* TTM fence methods */
+bool
+via_fence_signaled(void *sync_obj)
+{
+ struct via_fence *fence = sync_obj;
+
+ if (!fence || !fence->seq.key)
+ return true;
+
+ if (via_fence_seq_signaled(fence, fence->seq.key)) {
+ fence->seq.key = 0;
+ return true;
+ }
+ return false;
+}
+
+int
+via_fence_wait(void *sync_obj, bool lazy, bool interruptible)
+{
+ struct via_fence *fence = sync_obj;
+ int ret = 0;
+
+ while (!via_fence_seq_signaled(fence, fence->seq.key)) {
+ if (time_after(jiffies, fence->timeout)) {
+ DRM_INFO("The fence wait timeout timeout = %lu, jiffies = %lu.\n",
+ fence->timeout, jiffies);
+ ret = -EBUSY;
+ break;
+ }
+
+ set_current_state(interruptible ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ return ret;
+}
+
+int
+via_fence_flush(void *sync_obj)
+{
+ return 0;
+}
+
+void
+via_fence_unref(void **sync_obj)
+{
+ struct via_fence *fence = *sync_obj;
+
+ *sync_obj = NULL;
+ if (fence)
+ kref_put(&fence->kref, &via_fence_destroy);
+}
+
+void *
+via_fence_ref(void *sync_obj)
+{
+ struct via_fence *fence = sync_obj;
+
+ kref_get(&fence->kref);
+ return sync_obj;
+}
+
+/* We assert 30 * sizeof(uint32_t) is enough for emit fence sequence */
+#define FENCE_CMD_BUFFER (256 * sizeof(uint32_t))
+
+int
+via_fence_pool_init(struct via_fence_pool *pool, struct drm_device *dev,
+ char *name, int num_engines, struct dma_pool *ctx)
+{
+ int size = sizeof(num_engines * sizeof(struct via_fence_engine *));
+ struct drm_via_private *dev_priv = dev->dev_private;
+ struct via_fence_engine *eng;
+ int ret = 0, i;
+ void *par;
+
+ i = sizeof(num_engines * sizeof(struct via_fence_engine));
+ par = kzalloc(size + i, GFP_KERNEL);
+ if (!par)
+ return -ENOMEM;
+
+ pool->engines = par;
+ eng = par + size;
+
+ /* allocate fence sync bo */
+ ret = ttm_allocate_kernel_buffer(&dev_priv->bdev, PAGE_SIZE, 16,
+ TTM_PL_FLAG_VRAM, &pool->fence_sync);
+ if (unlikely(ret)) {
+ DRM_ERROR("allocate fence sync bo error.\n");
+ goto out_err;
+ }
+
+ pool->cmd_buffer = kzalloc(FENCE_CMD_BUFFER, GFP_KERNEL);
+ if (!pool->cmd_buffer)
+ goto out_err;
+
+ spin_lock_init(&pool->lock);
+ pool->num_engines = num_engines;
+ pool->dev = dev;
+
+ if (!ctx) {
+ struct page *page = pool->fence_sync.bo->ttm->pages[0];
+
+ pool->bus_addr = dma_map_page(dev->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < pool->num_engines; i++) {
+ eng->write_seq = pool->fence_sync.virtual + VIA_FENCE_SIZE * i;
+ if (!ctx) {
+ eng->fence_phy_addr = pool->bus_addr + VIA_FENCE_SIZE * i;
+ eng->read_seq = eng->write_seq;
+ } else {
+ eng->read_seq = dma_pool_alloc(ctx, GFP_KERNEL,
+ &eng->fence_phy_addr);
+ }
+
+ INIT_WORK(&eng->fence_work, via_fence_work);
+ eng->fence_wq = create_singlethread_workqueue(name);
+ eng->pool = pool;
+ pool->engines[i] = eng;
+ eng += sizeof(struct via_fence_engine);
+ }
+ ret = drm_ht_create(&pool->pending, 12);
+out_err:
+ if (ret)
+ via_fence_pool_fini(pool);
+ return ret;
+}
+
+void
+via_fence_pool_fini(struct via_fence_pool *pool)
+{
+ struct ttm_buffer_object *sync_bo;
+ struct via_fence_engine *eng;
+ int i;
+
+ drm_ht_remove(&pool->pending);
+
+ kfree(pool->cmd_buffer);
+
+ for (i = 0; i < pool->num_engines; i++) {
+ eng = pool->engines[i];
+
+ destroy_workqueue(eng->fence_wq);
+ }
+ kfree(pool->engines);
+
+ sync_bo = pool->fence_sync.bo;
+ if (sync_bo) {
+ ttm_bo_unpin(sync_bo, &pool->fence_sync);
+ ttm_bo_unref(&sync_bo);
+ }
+
+ if (pool->bus_addr)
+ dma_unmap_page(pool->dev->dev, pool->bus_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ kfree(pool);
+}
diff --git a/drivers/gpu/drm/via/via_fence.h b/drivers/gpu/drm/via/via_fence.h
new file mode 100644
index 0000000..ed32626
--- /dev/null
+++ b/drivers/gpu/drm/via/via_fence.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2013 James Simmons <jsimmons at infradead.org>
+ *
+ * Influenced by sample code from VIA Technologies and the radeon driver.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_FENCE_H_
+#define _VIA_FENCE_H_
+
+#define VIA_FENCE_SIZE 32
+
+struct via_fence;
+
+struct via_fence_engine {
+ struct via_fence_pool *pool;
+
+ /* BUS address used for fencing */
+ dma_addr_t fence_phy_addr;
+
+ /* virtual address for setting seq value */
+ void *write_seq;
+ /* virtual address for getting seq value */
+ void *read_seq;
+
+ struct workqueue_struct *fence_wq;
+ struct work_struct fence_work;
+};
+
+struct via_fence_pool {
+ struct ttm_bo_kmap_obj fence_sync;
+ /* BUS address used for fencing */
+ dma_addr_t bus_addr;
+
+ /* for access synchronization */
+ spinlock_t lock;
+
+ /* Fence command bounce buffer */
+ uint32_t *cmd_buffer;
+
+ struct drm_open_hash pending;
+ struct drm_device *dev;
+
+ struct via_fence_engine **engines;
+ unsigned int num_engines;
+
+ void (*fence_signaled)(struct via_fence *fence);
+ void (*fence_cleanup)(struct via_fence *fence);
+ int (*fence_emit)(struct via_fence *fence);
+};
+
+struct via_fence {
+ /* Which fence pool (DMA or other), this fence is associated with */
+ struct via_fence_pool *pool;
+ /* the sequence number that the fence object emit,
+ * stored in a hash key */
+ struct drm_hash_item seq;
+ /* the time to wait for the fence object signal */
+ unsigned long timeout;
+ /* Which engine this belongs too */
+ int engine;
+ /* the reference information of this fence object */
+ struct kref kref;
+ /* place holder for special data specific to fence type */
+ void *priv;
+};
+
+extern bool via_fence_signaled(void *sync_obj);
+extern int via_fence_wait(void *sync_obj, bool lazy, bool interruptible);
+extern int via_fence_flush(void *sync_obj);
+extern void via_fence_unref(void **sync_obj);
+extern void *via_fence_ref(void *sync_obj);
+
+extern struct via_fence *
+via_fence_create_and_emit(struct via_fence_pool *pool, void *data,
+ unsigned int engine);
+extern int
+via_fence_pool_init(struct via_fence_pool *pool, struct drm_device *dev,
+ char *name, int num_engines, struct dma_pool *ctx);
+extern void via_fence_pool_fini(struct via_fence_pool *pool);
+
+#endif
diff --git a/drivers/gpu/drm/via/via_ttm.c b/drivers/gpu/drm/via/via_ttm.c
index fd6ff17..0c19bfc 100644
--- a/drivers/gpu/drm/via/via_ttm.c
+++ b/drivers/gpu/drm/via/via_ttm.c
@@ -412,11 +412,11 @@ static struct ttm_bo_driver via_bo_driver = {
.evict_flags = via_evict_flags,
.move = via_bo_move,
.verify_access = via_verify_access,
- /*.sync_obj_signaled = via_fence_signalled,
+ .sync_obj_signaled = via_fence_signaled,
.sync_obj_wait = via_fence_wait,
.sync_obj_flush = via_fence_flush,
.sync_obj_unref = via_fence_unref,
- .sync_obj_ref = via_fence_ref,*/
+ .sync_obj_ref = via_fence_ref,
.io_mem_reserve = via_ttm_io_mem_reserve,
.io_mem_free = via_ttm_io_mem_free,
};
commit e26a8da608fa8d4dd334ba1917fd0a93107ae11b
Author: James Simmons <jsimmons at infradead.org>
Date: Sat Jan 26 19:51:13 2013 -0500
via_dmabuffer.h really is for dma blitting and not the command engine. Make drm_via_sg_info a structure.
diff --git a/drivers/gpu/drm/via/via_dma.h b/drivers/gpu/drm/via/via_dma.h
new file mode 100644
index 0000000..cdfff36
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dma.h
@@ -0,0 +1,157 @@
+/* via_dma.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Hellstrom.
+ * Register info from Digeo Inc.
+ */
+
+#ifndef _VIA_DMA_H
+#define _VIA_DMA_H
+
+#include <linux/dma-mapping.h>
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+#define VIA_OUT_RING_H1(nReg, nData) { \
+ *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
+ *((uint32_t *)(vb) + 1) = (nData); \
+ vb = ((uint32_t *)vb) + 2; \
+ dev_priv->dma_low += 8; \
+}
+
+/* For H5/6 2D cmd load(Two value :cmd with address)***/
+#define VIA_OUT_RING_QW(w1, w2) do { \
+ *vb++ = (w1); \
+ *vb++ = (w2); \
+ dev_priv->dma_low += 8; \
+} while (0)
+
+struct via_h1_header {
+ uint32_t mem_addr;
+ uint32_t dev_addr;
+ uint32_t size;
+ uint32_t next;
+};
+
+struct drm_via_sg_info {
+ struct page **pages;
+ unsigned long num_pages;
+ struct via_h1_header **desc_pages;
+ int num_desc_pages;
+ int num_desc;
+ enum dma_data_direction direction;
+ unsigned char *bounce_buffer;
+ dma_addr_t chain_start;
+ uint32_t free_on_sequence;
+ unsigned int descriptors_per_page;
+ int aborted;
+ enum {
+ dr_via_device_mapped,
+ dr_via_desc_pages_alloc,
+ dr_via_pages_locked,
+ dr_via_pages_alloc,
+ dr_via_sg_init
+ } state;
+};
+
+typedef struct _drm_via_blitq {
+ struct drm_device *dev;
+ uint32_t cur_blit_handle;
+ uint32_t done_blit_handle;
+ unsigned serviced;
+ unsigned head;
+ unsigned cur;
+ unsigned num_free;
+ unsigned num_outstanding;
+ unsigned long end;
+ int aborting;
+ int is_active;
+ struct drm_via_sg_info *blits[VIA_NUM_BLIT_SLOTS];
+ spinlock_t blit_lock;
+ wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+ wait_queue_head_t busy_queue;
+ struct work_struct wq;
+ struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+
+/*
+ * PCI DMA Registers
+ * Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
+#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
+#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
+#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
+#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
+#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+
+#endif
diff --git a/drivers/gpu/drm/via/via_dmabuffer.h b/drivers/gpu/drm/via/via_dmabuffer.h
deleted file mode 100644
index 47b36af..0000000
--- a/drivers/gpu/drm/via/via_dmabuffer.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* via_dmabuffer.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom.
- * Register info from Digeo Inc.
- */
-
-#ifndef _VIA_DMABLIT_H
-#define _VIA_DMABLIT_H
-
-#include <linux/dma-mapping.h>
-
-#define VIA_NUM_BLIT_ENGINES 2
-#define VIA_NUM_BLIT_SLOTS 8
-
-#define VIA_OUT_RING_H1(nReg, nData) { \
- *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
- *((uint32_t *)(vb) + 1) = (nData); \
- vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low += 8; \
-}
-
-/* For H5/6 2D cmd load(Two value :cmd with address)***/
-#define VIA_OUT_RING_QW(w1, w2) do { \
- *vb++ = (w1); \
- *vb++ = (w2); \
- dev_priv->dma_low += 8; \
-} while (0)
-
-struct via_h1_header {
- uint32_t mem_addr;
- uint32_t dev_addr;
- uint32_t size;
- uint32_t next;
-};
-
-typedef struct _drm_via_sg_info {
- struct sg_table *table;
- struct page **pages;
- unsigned long num_pages;
- struct via_h1_header **desc_pages;
- int num_desc_pages;
- int num_desc;
- enum dma_data_direction direction;
- unsigned char *bounce_buffer;
- dma_addr_t chain_start;
- uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
- int aborted;
- enum {
- dr_via_device_mapped,
- dr_via_desc_pages_alloc,
- dr_via_pages_locked,
- dr_via_pages_alloc,
- dr_via_sg_init
- } state;
-} drm_via_sg_info_t;
-
-typedef struct _drm_via_blitq {
- struct drm_device *dev;
- uint32_t cur_blit_handle;
- uint32_t done_blit_handle;
- unsigned serviced;
- unsigned head;
- unsigned cur;
- unsigned num_free;
- unsigned num_outstanding;
- unsigned long end;
- int aborting;
- int is_active;
- drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
- spinlock_t blit_lock;
- wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
- wait_queue_head_t busy_queue;
- struct work_struct wq;
- struct timer_list poll_timer;
-} drm_via_blitq_t;
-
-
-/*
- * PCI DMA Registers
- * Channels 2 & 3 don't seem to be implemented in hardware.
- */
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
-/* DPR */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
-#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
-
-/* MR */
-#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
-#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
-#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
-
-/* CSR */
-#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
-#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
-#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
-#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
-#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-
-#endif
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 88ee4d2..f5e50ac 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -46,8 +46,8 @@
#include <drm/via_drm.h>
#include "via_regs.h"
+#include "via_dma.h"
#include "via_verifier.h"
-#include "via_dmabuffer.h"
#include "via_display.h"
#define VIA_MM_ALIGN_SIZE 16
diff --git a/drivers/gpu/drm/via/via_h1_cmdbuf.c b/drivers/gpu/drm/via/via_h1_cmdbuf.c
index a5bb304..c235e2a 100644
--- a/drivers/gpu/drm/via/via_h1_cmdbuf.c
+++ b/drivers/gpu/drm/via/via_h1_cmdbuf.c
@@ -41,8 +41,6 @@
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
-#define via_flush_write_combine() DRM_MEMORYBARRIER()
-
static void via_cmdbuf_start(struct drm_via_private *dev_priv);
static void via_cmdbuf_pause(struct drm_via_private *dev_priv);
static void via_cmdbuf_reset(struct drm_via_private *dev_priv);
@@ -364,11 +362,11 @@ static int via_hook_segment(struct drm_via_private *dev_priv,
uint32_t diff;
paused = 0;
- via_flush_write_combine();
+ mb();
(void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
*paused_at = pause_addr_lo;
- via_flush_write_combine();
+ mb();
(void) *paused_at;
reader = ioread32(dev_priv->hw_addr_ptr);
@@ -491,7 +489,7 @@ static void via_cmdbuf_start(struct drm_via_private *dev_priv)
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
&pause_addr_hi, &pause_addr_lo, 1) - 1;
- via_flush_write_combine();
+ mb();
(void) *(volatile uint32_t *) dev_priv->last_pause_ptr;
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
@@ -501,7 +499,7 @@ static void via_cmdbuf_start(struct drm_via_private *dev_priv)
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- DRM_WRITEMEMORYBARRIER();
+ wmb();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
VIA_READ(VIA_REG_TRANSPACE);
diff --git a/drivers/gpu/drm/via/via_h1_dma.c b/drivers/gpu/drm/via/via_h1_dma.c
index 8c5d926..9befd8a 100644
--- a/drivers/gpu/drm/via/via_h1_dma.c
+++ b/drivers/gpu/drm/via/via_h1_dma.c
@@ -25,31 +25,55 @@
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
+#include <drm/drmP.h>
+#include "via_drv.h"
+#include "via_dma.h"
+#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
+ * Fire a blit engine.
*/
+static void
+via_h1_fire_dmablit(struct drm_device *dev, struct drm_via_sg_info *vsg, int engine)
+{
+ struct drm_via_private *dev_priv = dev->dev_private;
-#include <drm/drmP.h>
-#include "via_drv.h"
-#include "via_dmabuffer.h"
+ VIA_WRITE(VIA_PCI_DMA_MAR0 + engine * 0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_DAR0 + engine * 0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_DMA_CSR_DE);
+ VIA_WRITE(VIA_PCI_DMA_MR0 + engine * 0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ VIA_WRITE(VIA_PCI_DMA_BCR0 + engine * 0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_DPR0 + engine * 0x10, vsg->chain_start);
+ wmb();
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+ VIA_READ(VIA_PCI_DMA_CSR0 + engine * 0x04);
+}
+
+static void
+via_abort_dmablit(struct drm_device *dev, int engine)
+{
+ struct drm_via_private *dev_priv = dev->dev_private;
-#include <linux/pagemap.h>
-#include <linux/slab.h>
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TA);
+}
-#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
-#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
-#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
+static void
+via_dmablit_engine_off(struct drm_device *dev, int engine)
+{
+ struct drm_via_private *dev_priv = dev->dev_private;
+
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
/*
* Unmap a DMA mapping.
*/
static void
-via_unmap_blit_from_device(struct device *dev, drm_via_sg_info_t *vsg)
+via_unmap_from_device(struct device *dev, struct drm_via_sg_info *vsg)
{
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
@@ -65,6 +89,7 @@ via_unmap_blit_from_device(struct device *dev, drm_via_sg_info_t *vsg)
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
+
dma_unmap_single(dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
dma_unmap_page(dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
next = (dma_addr_t) desc_ptr->next;
@@ -76,7 +101,7 @@ via_unmap_blit_from_device(struct device *dev, drm_via_sg_info_t *vsg)
* Count how many descriptors are needed.
*/
static void
-via_count_descriptors(const drm_via_dmablit_t *xfer, drm_via_sg_info_t *vsg)
+via_count_descriptors(const drm_via_dmablit_t *xfer, struct drm_via_sg_info *vsg)
{
unsigned char *mem_addr = xfer->mem_addr, *cur_mem;
uint32_t fb_addr = xfer->fb_addr, cur_fb;
@@ -105,13 +130,13 @@ via_count_descriptors(const drm_via_dmablit_t *xfer, drm_via_sg_info_t *vsg)
}
/*
- * Map the DMA pages for the device, put together and map also the descriptors. Descriptors
+ * Map the DMA pages for the device, put together and map also the descriptors. Descriptors
* are run in reverse order by the hardware because we are not allowed to update the
* 'next' field without syncing calls when the descriptor is already mapped.
*/
static void
-via_map_blit_for_device(struct device *dev, const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg)
+via_map_for_device(struct device *dev, const drm_via_dmablit_t *xfer,
+ struct drm_via_sg_info *vsg)
{
unsigned num_descriptors_this_page = 0, cur_descriptor_page = 0;
unsigned char *mem_addr = xfer->mem_addr, *cur_mem;
@@ -162,19 +187,46 @@ via_map_blit_for_device(struct device *dev, const drm_via_dmablit_t *xfer,
}
/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
+ */
+static int
+via_alloc_desc_pages(struct drm_via_sg_info *vsg)
+{
+ int i;
+
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof(struct via_h1_header);
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->descriptors_per_page;
+
+ if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
+ return -ENOMEM;
+
+ vsg->state = dr_via_desc_pages_alloc;
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
+ if (NULL == (vsg->desc_pages[i] = (struct via_h1_header *) __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ }
+ DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+ vsg->num_desc);
+ return 0;
+}
+
+/*
* Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
-via_free_sg_info(struct device *dev, drm_via_sg_info_t *vsg)
+via_free_sg_info(struct device *dev, struct drm_via_sg_info *vsg)
{
struct page *page;
int i;
switch (vsg->state) {
case dr_via_device_mapped:
- via_unmap_blit_from_device(dev, vsg);
+ via_unmap_from_device(dev, vsg);
case dr_via_desc_pages_alloc:
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
@@ -200,31 +252,11 @@ via_free_sg_info(struct device *dev, drm_via_sg_info_t *vsg)
}
/*
- * Fire a blit engine.
- */
-static void
-via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
-{
- struct drm_via_private *dev_priv = dev->dev_private;
-
- VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
- VIA_DMA_CSR_DE);
- VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- DRM_WRITEMEMORYBARRIER();
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
-}
-
-/*
* Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
* occur here if the calling user does not have access to the submitted address.
*/
static int
-via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *ttm, drm_via_dmablit_t *xfer)
+via_lock_all_dma_pages(struct drm_device *dev, struct drm_via_sg_info *ttm, drm_via_dmablit_t *xfer)
{
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
int write = xfer->to_fb, ret;
@@ -259,49 +291,6 @@ via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *ttm, drm_via_
}
/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
- * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contingous.
- */
-static int
-via_alloc_desc_pages(drm_via_sg_info_t *vsg)
-{
- int i;
-
- vsg->descriptors_per_page = PAGE_SIZE / sizeof(struct via_h1_header);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
- vsg->descriptors_per_page;
-
- if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
- return -ENOMEM;
-
- vsg->state = dr_via_desc_pages_alloc;
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] = (struct via_h1_header *) __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- }
- DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
- vsg->num_desc);
- return 0;
-}
-
-static void
-via_abort_dmablit(struct drm_device *dev, int engine)
-{
- struct drm_via_private *dev_priv = dev->dev_private;
-
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
-}
-
-static void
-via_dmablit_engine_off(struct drm_device *dev, int engine)
-{
- struct drm_via_private *dev_priv = dev->dev_private;
-
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
-}
-
-/*
* The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
* The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
@@ -325,7 +314,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
done_transfer = blitq->is_active &&
- ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine * 0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
@@ -344,18 +333,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
* Clear transfer done flag.
*/
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TD);
blitq->is_active = 0;
blitq->aborting = 0;
schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
-
/*
* Abort transfer after one second.
*/
-
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
@@ -363,7 +350,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
if (!blitq->is_active) {
if (blitq->num_outstanding) {
- via_fire_dmablit(dev, blitq->blits[cur], engine);
+ via_h1_fire_dmablit(dev, blitq->blits[cur], engine);
blitq->is_active = 1;
blitq->cur = cur;
blitq->num_outstanding--;
@@ -398,7 +385,6 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
/*
* Allow for handle wraparounds.
*/
-
active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
((blitq->cur_blit_handle - handle) <= (1 << 23));
@@ -462,9 +448,7 @@ via_dmablit_timer(unsigned long data)
* Rerun handler to delete timer if engines are off, and
* to shorten abort latency. This is a little nasty.
*/
-
via_dmablit_handler(dev, engine, 0);
-
}
}
@@ -478,11 +462,10 @@ via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
struct drm_device *dev = blitq->dev;
+ struct drm_via_sg_info *cur_sg;
unsigned long irqsave;
- drm_via_sg_info_t *cur_sg;
int cur_released;
-
DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
(blitq - ((struct drm_via_private *)dev->dev_private)->blit_queues));
@@ -551,7 +534,7 @@ via_init_dmablit(struct drm_device *dev)
* Build all info and do all mappings required for a blit.
*/
static int
-via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+via_build_sg_info(struct drm_device *dev, struct drm_via_sg_info *vsg, drm_via_dmablit_t *xfer)
{
int ret = 0;
@@ -637,7 +620,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
- via_map_blit_for_device(dev->dev, xfer, vsg);
+ via_map_for_device(dev->dev, xfer, vsg);
return 0;
}
@@ -690,7 +673,7 @@ static int
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
struct drm_via_private *dev_priv = dev->dev_private;
- drm_via_sg_info_t *vsg;
+ struct drm_via_sg_info *vsg;
drm_via_blitq_t *blitq;
unsigned long irqsave;
int engine, ret;
@@ -705,8 +688,6 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
return ret;
- //ret = sg_alloc_table(sgtab, num_pages, GFP_KERNEL);
-
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
via_dmablit_release_slot(blitq);
return -ENOMEM;
More information about the Openchrome-devel
mailing list