[openchrome-devel] drm-openchrome: Branch 'drm-next-4.21' - 2 commits - drivers/gpu/drm

Kevin Brace kevinbrace at kemper.freedesktop.org
Fri Jan 4 23:58:55 UTC 2019


 drivers/gpu/drm/openchrome/Makefile               |    6 
 drivers/gpu/drm/openchrome/openchrome_crtc.c      |   16 
 drivers/gpu/drm/openchrome/openchrome_dma.h       |  134 --
 drivers/gpu/drm/openchrome/openchrome_drv.c       |   77 -
 drivers/gpu/drm/openchrome/openchrome_drv.h       |   89 -
 drivers/gpu/drm/openchrome/openchrome_fence.c     |  282 -----
 drivers/gpu/drm/openchrome/openchrome_fence.h     |   97 -
 drivers/gpu/drm/openchrome/openchrome_h1_cmdbuf.c |  666 ------------
 drivers/gpu/drm/openchrome/openchrome_h1_dma.c    |  233 ----
 drivers/gpu/drm/openchrome/openchrome_ioc32.c     |   10 
 drivers/gpu/drm/openchrome/openchrome_irq.c       |  555 ----------
 drivers/gpu/drm/openchrome/openchrome_sgdma.c     |  117 --
 drivers/gpu/drm/openchrome/openchrome_ttm.c       |  310 -----
 drivers/gpu/drm/openchrome/openchrome_verifier.c  | 1139 ----------------------
 drivers/gpu/drm/openchrome/openchrome_verifier.h  |   62 -
 15 files changed, 3 insertions(+), 3790 deletions(-)

New commits:
commit 3fa71816c7f9c069a48d35604ae4223086bf3c75
Author: Kevin Brace <kevinbrace at gmx.com>
Date:   Fri Jan 4 15:56:33 2019 -0800

    Version bumped to 3.1.0
    
    Massive removal of unfinished acceleration code. ABI break will necessitate
    incrementing the minor version number.
    
    Signed-off-by: Kevin Brace <kevinbrace at gmx.com>

diff --git a/drivers/gpu/drm/openchrome/openchrome_drv.h b/drivers/gpu/drm/openchrome/openchrome_drv.h
index adce322b9ceb..d57f735a0b83 100644
--- a/drivers/gpu/drm/openchrome/openchrome_drv.h
+++ b/drivers/gpu/drm/openchrome/openchrome_drv.h
@@ -31,11 +31,11 @@
 #define DRIVER_AUTHOR		"OpenChrome Project"
 #define DRIVER_NAME		"openchrome"
 #define DRIVER_DESC		"OpenChrome DRM for VIA Technologies Chrome IGP"
-#define DRIVER_DATE		"20181012"
+#define DRIVER_DATE		"20190104"
 
 #define DRIVER_MAJOR		3
-#define DRIVER_MINOR		0
-#define DRIVER_PATCHLEVEL	88
+#define DRIVER_MINOR		1
+#define DRIVER_PATCHLEVEL	0
 #include <linux/module.h>
 
 #include "ttm/ttm_bo_api.h"
commit 64947142a1cf8b83faa73da7aa35a17f6a24568a
Author: Kevin Brace <kevinbrace at gmx.com>
Date:   Fri Jan 4 15:49:06 2019 -0800

    Remove unfinished acceleration code
    
    This will break ABI, hence, it will be necessary to increment the minor
    version level at a minimum.
    
    Signed-off-by: Kevin Brace <kevinbrace at gmx.com>

diff --git a/drivers/gpu/drm/openchrome/Makefile b/drivers/gpu/drm/openchrome/Makefile
index c3f8ede797ec..0cccdbe7f5a4 100644
--- a/drivers/gpu/drm/openchrome/Makefile
+++ b/drivers/gpu/drm/openchrome/Makefile
@@ -11,21 +11,15 @@ openchrome-y := openchrome_analog.o \
 		openchrome_drv.o \
 		openchrome_encoder.o \
 		openchrome_fb.o \
-		openchrome_fence.o \
 		openchrome_fp.o \
 		openchrome_gem.o \
-		openchrome_h1_cmdbuf.o \
-		openchrome_h1_dma.o \
 		openchrome_hdmi.o \
 		openchrome_i2c.o \
 		openchrome_ioc32.o \
-		openchrome_irq.o \
 		openchrome_pm.o \
 		openchrome_sii164.o \
-		openchrome_sgdma.o \
 		openchrome_tmds.o \
 		openchrome_ttm.o \
-		openchrome_verifier.o \
 		openchrome_vt1632.o
 
 obj-$(CONFIG_DRM_OPENCHROME)	+= openchrome.o
diff --git a/drivers/gpu/drm/openchrome/openchrome_crtc.c b/drivers/gpu/drm/openchrome/openchrome_crtc.c
index 601a0ac11643..d2e15bb18a7e 100644
--- a/drivers/gpu/drm/openchrome/openchrome_crtc.c
+++ b/drivers/gpu/drm/openchrome/openchrome_crtc.c
@@ -539,8 +539,6 @@ static const struct drm_crtc_funcs via_iga1_funcs = {
 	.gamma_set = via_iga1_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = via_crtc_destroy,
-	.enable_vblank = via_enable_vblank,
-	.disable_vblank = via_disable_vblank,
 };
 
 static const struct drm_crtc_funcs via_iga2_funcs = {
@@ -549,8 +547,6 @@ static const struct drm_crtc_funcs via_iga2_funcs = {
 	.gamma_set = via_iga2_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = via_crtc_destroy,
-	.enable_vblank = via_enable_vblank,
-	.disable_vblank = via_disable_vblank,
 };
 
 static void via_load_vpit_regs(struct via_device *dev_priv)
@@ -1816,8 +1812,6 @@ via_iga1_crtc_disable(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_off(crtc);
-
 	/* Turn off the cursor */
 	via_hide_cursor(crtc);
 
@@ -1831,8 +1825,6 @@ via_iga1_crtc_prepare(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_off(crtc);
-
 	/* Blank the screen */
 	if (crtc->enabled)
 		via_iga1_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -1845,8 +1837,6 @@ via_iga1_crtc_commit(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_on(crtc);
-
 	/* Turn on the monitor */
 	if (crtc->enabled)
 		via_iga1_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
@@ -2098,8 +2088,6 @@ via_iga2_crtc_disable(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_off(crtc);
-
 	/* Turn off the cursor */
 	via_hide_cursor(crtc);
 
@@ -2113,8 +2101,6 @@ via_iga2_crtc_prepare(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_off(crtc);
-
 	/* Blank the screen */
 	if (crtc->enabled)
 		via_iga2_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -2127,8 +2113,6 @@ via_iga2_crtc_commit(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	drm_crtc_vblank_on(crtc);
-
 	/* Turn on the monitor */
 	if (crtc->enabled)
 		via_iga2_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/openchrome/openchrome_dma.h b/drivers/gpu/drm/openchrome/openchrome_dma.h
deleted file mode 100644
index a6912bdbdbd0..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_dma.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* openchrome_dma.h -- PCI DMA BitBlt support for VIA Unichrome/Pro
- *
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Thomas Hellstrom.
- *    Register info from Digeo Inc.
- */
-
-#ifndef _VIA_DMA_H
-#define _VIA_DMA_H
-
-#include <linux/dma-mapping.h>
-
-#define VIA_NUM_BLIT_ENGINES 2
-#define VIA_NUM_BLIT_SLOTS 8
-
-#define VIA_OUT_RING_H1(nReg, nData) {				\
-	*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;	\
-	*((uint32_t *)(vb) + 1) = (nData);			\
-	vb = ((uint32_t *)vb) + 2;				\
-	dev_priv->dma_low += 8;					\
-}
-
-/* For H5/6 2D cmd load(Two value :cmd with address)***/
-#define VIA_OUT_RING_QW(w1, w2) do {		\
-	*vb++ = (w1);				\
-	*vb++ = (w2);				\
-	dev_priv->dma_low += 8;			\
-} while (0)
-
-struct via_h1_header {
-	uint32_t mem_addr;
-	uint32_t dev_addr;
-	uint32_t size;
-	uint32_t next;
-};
-
-struct drm_via_sg_info {
-	struct ttm_tt *ttm;
-	unsigned long **desc_pages;
-	int num_desc_pages;
-	int num_desc;
-	enum dma_data_direction direction;
-	unsigned long dev_start;
-	dma_addr_t chain_start;
-	unsigned int descriptors_per_page;
-	int aborted;
-	enum {
-		dr_via_device_mapped,
-		dr_via_desc_pages_alloc,
-		dr_via_pages_locked,
-		dr_via_pages_alloc,
-		dr_via_sg_init
-	} state;
-};
-
-/*
- *  PCI DMA Registers
- *  Channels 2 & 3 don't seem to be implemented in hardware.
- */
-
-#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */
-
-/* Define for DMA engine */
-/* DPR */
-#define VIA_DMA_DPR_EC		(1<<1)	/* end of chain */
-#define VIA_DMA_DPR_DDIE	(1<<2)	/* descriptor done interrupt enable */
-#define VIA_DMA_DPR_DT		(1<<3)	/* direction of transfer (RO) */
-
-/* MR */
-#define VIA_DMA_MR_CM		(1<<0)	/* chaining mode */
-#define VIA_DMA_MR_TDIE		(1<<1)	/* transfer done interrupt enable */
-#define VIA_DMA_MR_HENDMACMD		(1<<7) /* ? */
-
-/* CSR */
-#define VIA_DMA_CSR_DE		(1<<0)	/* DMA enable */
-#define VIA_DMA_CSR_TS		(1<<1)	/* transfer start */
-#define VIA_DMA_CSR_TA		(1<<2)	/* transfer abort */
-#define VIA_DMA_CSR_TD		(1<<3)	/* transfer done */
-#define VIA_DMA_CSR_DD		(1<<4)	/* descriptor done */
-#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
-
-#endif
diff --git a/drivers/gpu/drm/openchrome/openchrome_drv.c b/drivers/gpu/drm/openchrome/openchrome_drv.c
index 505e546d7375..d8db079c8805 100644
--- a/drivers/gpu/drm/openchrome/openchrome_drv.c
+++ b/drivers/gpu/drm/openchrome/openchrome_drv.c
@@ -472,25 +472,12 @@ static void via_driver_unload(struct drm_device *dev)
 {
 	struct via_device *dev_priv = dev->dev_private;
 	struct ttm_buffer_object *bo;
-	int ret = 0;
 
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
 
-	ret = via_dma_cleanup(dev);
-	if (ret)
-		return;
-
-	drm_irq_uninstall(dev);
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		via_modeset_fini(dev);
 
-	via_fence_pool_fini(dev_priv->dma_fences);
-
-	/* destroy work queue. */
-	if (dev_priv->wq)
-		destroy_workqueue(dev_priv->wq);
-
 	bo = dev_priv->vq.bo;
 	if (bo) {
 		via_bo_unpin(bo, &dev_priv->vq);
@@ -546,8 +533,6 @@ static int via_driver_load(struct drm_device *dev,
 	dev_priv->vram_mtrr = -ENXIO;
 	dev_priv->dev = dev;
 
-	via_init_command_verifier();
-
 	ret = via_device_init(dev_priv);
 	if (ret) {
 		DRM_ERROR("Failed to initialize Chrome IGP.\n");
@@ -604,26 +589,6 @@ static int via_driver_load(struct drm_device *dev,
 
 	via_engine_init(dev);
 
-	/* Setting up a work queue. */
-	dev_priv->wq = create_workqueue("viadrm");
-	if (!dev_priv->wq) {
-		DRM_ERROR("Failed to create a work queue.\n");
-		ret = -EINVAL;
-		goto init_error;
-	}
-
-	ret = drm_vblank_init(dev, 2);
-	if (ret) {
-		DRM_ERROR("Failed to initialize DRM VBlank.\n");
-		goto init_error;
-	}
-
-	ret = via_dmablit_init(dev);
-	if (ret) {
-		DRM_ERROR("Failed to initialize DMA.\n");
-		goto init_error;
-	}
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = via_modeset_init(dev);
 		if (ret) {
@@ -632,12 +597,6 @@ static int via_driver_load(struct drm_device *dev,
 		}
 	}
 
-	ret = drm_irq_install(dev, dev->pdev->irq);
-	if (ret) {
-		DRM_ERROR("Failed to initialize DRM IRQ.\n");
-		goto init_error;
-	}
-
 	goto exit;
 init_error:
 	if (ret)
@@ -647,22 +606,6 @@ exit:
 	return ret;
 }
 
-static int via_final_context(struct drm_device *dev, int context)
-{
-	DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
-	/* Linux specific until context tracking code gets ported to BSD */
-	/* Last context, perform cleanup */
-	if (dev->dev_private) {
-		DRM_DEBUG_KMS("Last Context\n");
-		drm_irq_uninstall(dev);
-		via_dma_cleanup(dev);
-	}
-
-	DRM_DEBUG_KMS("Exiting %s.\n", __func__);
-	return 1;
-}
-
 static void via_driver_lastclose(struct drm_device *dev)
 {
 	DRM_DEBUG_KMS("Entered %s.\n", __func__);
@@ -674,19 +617,6 @@ static void via_driver_lastclose(struct drm_device *dev)
 	DRM_DEBUG_KMS("Exiting %s.\n", __func__);
 }
 
-static void via_reclaim_buffers_locked(struct drm_device *dev,
-					struct drm_file *filp)
-{
-	DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
-	mutex_lock(&dev->struct_mutex);
-	via_driver_dma_quiescent(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	DRM_DEBUG_KMS("Exiting %s.\n", __func__);
-	return;
-}
-
 static int via_pm_ops_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -828,13 +758,6 @@ static struct drm_driver via_driver = {
 				DRIVER_IRQ_SHARED | DRIVER_GEM,
 	.load = via_driver_load,
 	.unload = via_driver_unload,
-	.preclose = via_reclaim_buffers_locked,
-	.context_dtor = via_final_context,
-	.irq_preinstall = via_driver_irq_preinstall,
-	.irq_postinstall = via_driver_irq_postinstall,
-	.irq_uninstall = via_driver_irq_uninstall,
-	.irq_handler = via_driver_irq_handler,
-	.dma_quiescent = via_driver_dma_quiescent,
 	.lastclose = via_driver_lastclose,
 	.gem_open_object = ttm_gem_open_object,
 	.gem_free_object = ttm_gem_free_object,
diff --git a/drivers/gpu/drm/openchrome/openchrome_drv.h b/drivers/gpu/drm/openchrome/openchrome_drv.h
index 312f659efb63..adce322b9ceb 100644
--- a/drivers/gpu/drm/openchrome/openchrome_drv.h
+++ b/drivers/gpu/drm/openchrome/openchrome_drv.h
@@ -51,17 +51,10 @@
 #include <drm/via_drm.h>
 
 #include "openchrome_display.h"
-#include "openchrome_dma.h"
-#include "openchrome_fence.h"
 #include "openchrome_regs.h"
-#include "openchrome_verifier.h"
 
 #define VIA_MM_ALIGN_SIZE	16
 
-#define VIA_PCI_BUF_SIZE	60000
-#define VIA_FIRE_BUF_SIZE	1024
-#define VIA_NUM_IRQS		4
-
 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
 #define CLE266_REVISION_AX	0x0A
@@ -87,20 +80,6 @@
 #define VX900_REVISION_A2	0x02
 #define VX900_REVISION_A3	0x03
 
-typedef uint32_t maskarray_t[5];
-
-typedef struct drm_via_irq {
-	atomic_t irq_received;
-	uint32_t pending_mask;
-	uint32_t enable_mask;
-	wait_queue_head_t irq_queue;
-} drm_via_irq_t;
-
-struct sgdma_tt {
-	struct ttm_dma_tt sgdma;
-	unsigned long offset;
-};
-
 struct via_state {
 	struct vga_regset crt_regs[256];
 	struct vga_regset seq_regs[256];
@@ -149,7 +128,6 @@ struct via_device {
 
 	int revision;
 
-	struct ttm_bo_kmap_obj dmabuf;
 	struct ttm_bo_kmap_obj gart;
 	struct ttm_bo_kmap_obj vq;
 
@@ -166,31 +144,6 @@ struct via_device {
 	struct via_state pm_cache;
 
 	enum via_engine engine_type;
-	struct drm_via_state hc_state;
-	unsigned int dma_low;
-	unsigned int dma_high;
-	unsigned int dma_offset;
-	uint32_t dma_diff;
-	uint32_t dma_wrap;
-	void __iomem *last_pause_ptr;
-	void __iomem *hw_addr_ptr;
-
-	char pci_buf[VIA_PCI_BUF_SIZE];
-	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
-	uint32_t num_fire_offsets;
-
-	drm_via_irq_t via_irqs[VIA_NUM_IRQS];
-	struct work_struct hotplug_work;
-	struct workqueue_struct *wq;
-	unsigned num_irqs;
-	maskarray_t *irq_masks;
-	uint32_t irq_enable_mask;
-	uint32_t irq_pending_mask;
-	int *irq_map;
-
-	/* fence handling */
-	struct via_fence_pool *dma_fences;
-	int desc_size;
 
 	struct via_crtc iga[2];
 	bool spread_spectrum;
@@ -313,42 +266,10 @@ extern int via_hdmi_audio;
 
 extern void via_engine_init(struct drm_device *dev);
 
-extern int via_dma_init(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-extern int via_flush_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int via_dispatch_cmdbuffer(struct drm_device *dev,
-					drm_via_cmdbuffer_t *cmd);
-extern int via_cmdbuffer(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int via_cmdbuf_size(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int via_pci_cmdbuffer(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int via_wait_irq(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-extern int via_wait_idle(struct via_device *dev_priv);
-
 extern int via_vram_detect(struct via_device *dev_priv);
 extern int openchrome_vram_init(struct via_device *dev_priv);
 extern void openchrome_vram_fini(struct via_device *dev_priv);
 
-extern int via_enable_vblank(struct drm_crtc *crtc);
-extern void via_disable_vblank(struct drm_crtc *crtc);
-
-extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
-extern void via_driver_irq_preinstall(struct drm_device *dev);
-extern int via_driver_irq_postinstall(struct drm_device *dev);
-extern void via_driver_irq_uninstall(struct drm_device *dev);
-
-extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(struct drm_device *dev);
-extern int via_dma_cleanup(struct drm_device *dev);
-
-extern void via_dmablit_handler(struct drm_device *dev,
-				int engine, int from_irq);
-extern int via_dmablit_init(struct drm_device *dev);
-
 extern int via_mm_init(struct via_device *dev_priv);
 void via_mm_fini(struct drm_device *dev);
 extern void ttm_placement_from_domain(struct ttm_buffer_object *bo,
@@ -390,10 +311,6 @@ extern struct drm_gem_object* ttm_gem_create(struct drm_device *dev,
 extern struct ttm_buffer_object* ttm_gem_mapping(
 					struct drm_gem_object *obj);
 
-extern struct ttm_tt* via_sgdma_backend_init(
-					struct ttm_buffer_object *bo,
-					uint32_t page_flags);
-
 void openchrome_transmitter_io_pad_state(struct via_device *dev_priv,
 				uint32_t di_port, bool io_pad_on);
 void openchrome_transmitter_clock_drive_strength(
diff --git a/drivers/gpu/drm/openchrome/openchrome_fence.c b/drivers/gpu/drm/openchrome/openchrome_fence.c
deleted file mode 100644
index 307240a44d08..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_fence.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright 2013 James Simmons <jsimmons at infradead.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include <linux/random.h>
-#include "drmP.h"
-
-#include "openchrome_drv.h"
-
-static void
-via_fence_destroy(struct kref *kref)
-{
-	struct via_fence *fence = container_of(kref, struct via_fence, kref);
-
-	if (fence->pool->fence_cleanup)
-		fence->pool->fence_cleanup(fence);
-	kfree(fence);
-}
-
-struct via_fence *
-via_fence_create_and_emit(struct via_fence_pool *pool, void *data,
-				unsigned int engine)
-{
-	struct via_fence *fence = NULL;
-
-	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
-	if (fence) {
-		unsigned long flags;
-		int ret = -EINVAL;
-
-		fence->timeout = jiffies + 3 * HZ;
-		fence->engine = engine;
-		fence->pool = pool;
-		fence->priv = data;
-		kref_init(&fence->kref);
-
-		if (engine >= pool->num_engines) {
-			via_fence_unref((void **) &fence);
-			return ERR_PTR(-ENXIO);
-		}
-		spin_lock_irqsave(&pool->lock, flags);
-try_again:
-		/* I like to use get_random_init but it is not exported :-( */
-		get_random_bytes(&fence->seq.key, 3);
-		/* For the small change you get a zero */
-		if (unlikely(fence->seq.key == 0))
-			goto try_again;
-
-		ret = drm_ht_insert_item_rcu(&pool->pending, &fence->seq);
-		if (unlikely(ret))
-			goto try_again;
-
-		ret = pool->fence_emit(fence);
-		if (ret) {
-			DRM_INFO("Failed to emit fence\n");
-			drm_ht_remove_item_rcu(&pool->pending, &fence->seq);
-			via_fence_unref((void **) &fence);
-			fence = ERR_PTR(ret);
-		}
-		spin_unlock_irqrestore(&pool->lock, flags);
-	}
-	return fence;
-}
-
-static void
-via_fence_work(struct work_struct *work)
-{
-	struct via_fence_engine *eng = container_of(work, struct via_fence_engine,
-							fence_work);
-	unsigned long seq = readl(eng->read_seq), flags;
-	struct via_fence_pool *pool = eng->pool;
-	struct drm_hash_item *hash = NULL;
-	int ret;
-
-	spin_lock_irqsave(&eng->pool->lock, flags);
-	ret = drm_ht_find_item_rcu(&pool->pending, seq, &hash);
-	if (likely(ret == 0)) {
-		ret = drm_ht_remove_item_rcu(&pool->pending, hash);
-		if (ret < 0)
-			DRM_DEBUG("Failed to remove seq %lx\n", seq);
-	}
-	if (eng->pool->fence_signaled)
-		eng->pool->fence_signaled(eng);
-	spin_unlock_irqrestore(&eng->pool->lock, flags);
-}
-
-static bool
-via_fence_seq_signaled(struct via_fence *fence, u64 seq)
-{
-	struct drm_hash_item *key;
-	unsigned long flags;
-	bool ret = false;
-
-	/* If the fence is no longer pending then its signaled */
-	spin_lock_irqsave(&fence->pool->lock, flags);
-	if (drm_ht_find_item_rcu(&fence->pool->pending, seq, &key))
-		ret = true;
-	spin_unlock_irqrestore(&fence->pool->lock, flags);
-	return ret;
-}
-
-/* TTM fence methods */
-bool
-via_fence_signaled(void *sync_obj)
-{
-	struct via_fence *fence = sync_obj;
-
-	if (!fence || !fence->seq.key)
-		return true;
-
-	if (via_fence_seq_signaled(fence, fence->seq.key)) {
-		fence->seq.key = 0;
-		return true;
-	}
-	return false;
-}
-
-int
-via_fence_wait(void *sync_obj, bool lazy, bool interruptible)
-{
-	struct via_fence *fence = sync_obj;
-	int ret = 0;
-
-	while (!via_fence_seq_signaled(fence, fence->seq.key)) {
-		if (time_after(jiffies, fence->timeout)) {
-			DRM_INFO("The fence wait timeout timeout = %lu, jiffies = %lu.\n",
-				fence->timeout, jiffies);
-			ret = -EBUSY;
-			break;
-		}
-
-		set_current_state(interruptible ? TASK_INTERRUPTIBLE :
-						TASK_UNINTERRUPTIBLE);
-
-		if (interruptible && signal_pending(current)) {
-			ret = -ERESTARTSYS;
-			break;
-		}
-	}
-	set_current_state(TASK_RUNNING);
-	return ret;
-}
-
-int
-via_fence_flush(void *sync_obj)
-{
-	return 0;
-}
-
-void
-via_fence_unref(void **sync_obj)
-{
-	struct via_fence *fence = *sync_obj;
-
-	*sync_obj = NULL;
-	if (fence)
-		kref_put(&fence->kref, &via_fence_destroy);
-}
-
-void *
-via_fence_ref(void *sync_obj)
-{
-	struct via_fence *fence = sync_obj;
-
-	kref_get(&fence->kref);
-	return sync_obj;
-}
-
-/* We assert 30 * sizeof(uint32_t) is enough for emit fence sequence */
-#define FENCE_CMD_BUFFER (256 * sizeof(uint32_t))
-
-struct via_fence_pool *
-via_fence_pool_init(struct drm_device *dev, char *name, int domain,
-			int num_engines)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	struct via_fence_pool *pool = NULL;
-	int ret = 0, size, i;
-	void *par = NULL;
-
-	size = sizeof(*pool) + num_engines * sizeof(*pool->engines);
-	pool = kzalloc(size, GFP_KERNEL);
-	if (!pool)
-		return ERR_PTR(-ENOMEM);
-
-	/* allocate fence sync bo */
-	ret = via_ttm_allocate_kernel_buffer(&dev_priv->ttm.bdev, PAGE_SIZE, 16,
-					domain, &pool->fence_sync);
-	if (unlikely(ret)) {
-		DRM_ERROR("allocate fence sync bo error.\n");
-		goto out_err;
-	}
-	ret = -ENOMEM;
-
-	pool->cmd_buffer = kzalloc(FENCE_CMD_BUFFER, GFP_KERNEL);
-	if (!pool->cmd_buffer)
-		goto out_err;
-
-	spin_lock_init(&pool->lock);
-	pool->num_engines = num_engines;
-	pool->dev = dev;
-
-	if (domain == TTM_PL_FLAG_TT) {
-		pool->bus_addr = dma_map_page(dev->dev, pool->fence_sync.bo->ttm->pages[0],
-						0, PAGE_SIZE, DMA_BIDIRECTIONAL);
-		par = pool->fence_sync.virtual;
-	} else if (domain == TTM_PL_FLAG_VRAM) {
-		pool->bus_addr = dma_map_single(dev->dev, pool->cmd_buffer,
-						FENCE_CMD_BUFFER, DMA_TO_DEVICE);
-		par = pool->cmd_buffer;
-	}
-
-	for (i = 0; i < pool->num_engines; i++) {
-		struct via_fence_engine *eng = &pool->engines[i];
-
-		INIT_WORK(&eng->fence_work, via_fence_work);
-		eng->read_seq = par + VIA_FENCE_SIZE * i;
-		eng->pool = pool;
-		eng->index = i;
-	}
-
-	pool->fence_wq = alloc_workqueue(name, 0, 0);
-	if (!pool->fence_wq)
-		goto out_err;
-
-	ret = drm_ht_create(&pool->pending, 12);
-out_err:
-	if (ret) {
-		via_fence_pool_fini(pool);
-		pool = ERR_PTR(ret);
-	}
-	return pool;
-}
-
-void
-via_fence_pool_fini(struct via_fence_pool *pool)
-{
-	struct ttm_buffer_object *sync_bo;
-	int i;
-
-	if (!pool)
-		return;
-
-	drm_ht_remove(&pool->pending);
-
-	flush_workqueue(pool->fence_wq);
-	destroy_workqueue(pool->fence_wq);
-
-	for (i = 0; i < pool->num_engines; i++)
-		cancel_work_sync(&pool->engines[i].fence_work);
-
-	kfree(pool->cmd_buffer);
-
-	sync_bo = pool->fence_sync.bo;
-	if (sync_bo) {
-		via_bo_unpin(sync_bo, &pool->fence_sync);
-		ttm_bo_unref(&sync_bo);
-	}
-
-	if (pool->bus_addr)
-		dma_unmap_page(pool->dev->dev, pool->bus_addr, PAGE_SIZE,
-				DMA_BIDIRECTIONAL);
-	kfree(pool);
-}
diff --git a/drivers/gpu/drm/openchrome/openchrome_fence.h b/drivers/gpu/drm/openchrome/openchrome_fence.h
deleted file mode 100644
index 188343223c35..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_fence.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2013 James Simmons <jsimmons at infradead.org>
- *
- * Influenced by sample code from VIA Technologies and the radeon driver.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _VIA_FENCE_H_
-#define _VIA_FENCE_H_
-
-#define VIA_FENCE_SIZE 32
-
-struct via_fence;
-
-struct via_fence_engine {
-	struct work_struct fence_work;
-	struct via_fence_pool *pool;
-
-	/* virtual address for getting seq value */
-	void *read_seq;
-
-	/* which engine we are */
-	int index;
-};
-
-struct via_fence_pool {
-	struct ttm_bo_kmap_obj fence_sync;
-	/* BUS address used for fencing */
-	dma_addr_t bus_addr;
-
-	/* for access synchronization */
-	spinlock_t lock;
-
-	/* Fence command bounce buffer */
-	uint32_t *cmd_buffer;
-
-	struct workqueue_struct *fence_wq;
-	struct drm_open_hash pending;
-	struct drm_device *dev;
-
-	void (*fence_signaled)(struct via_fence_engine *eng);
-	void (*fence_cleanup)(struct via_fence *fence);
-	int (*fence_emit)(struct via_fence *fence);
-
-	unsigned int num_engines;
-	struct via_fence_engine engines[];
-};
-
-struct via_fence {
-	/* Which fence pool (DMA or other), this fence is associated with */
-	struct via_fence_pool *pool;
-	/* the sequence number that the fence object emit,
-	 * stored in a hash key */
-	struct drm_hash_item seq;
-	/* the time to wait for the fence object signal */
-	unsigned long timeout;
-	/* Which engine this belongs too */
-	int engine;
-	/* the reference information of this fence object */
-	struct kref kref;
-	/* place holder for special data specific to fence type */
-	void *priv;
-};
-
-extern bool via_fence_signaled(void *sync_obj);
-extern int via_fence_wait(void *sync_obj, bool lazy, bool interruptible);
-extern int via_fence_flush(void *sync_obj);
-extern void via_fence_unref(void **sync_obj);
-extern void *via_fence_ref(void *sync_obj);
-
-extern struct via_fence *
-via_fence_create_and_emit(struct via_fence_pool *pool, void *data,
-				unsigned int engine);
-
-extern struct via_fence_pool *
-via_fence_pool_init(struct drm_device *dev, char *name, int domain,
-			int num_engines);
-extern void via_fence_pool_fini(struct via_fence_pool *pool);
-
-#endif
diff --git a/drivers/gpu/drm/openchrome/openchrome_h1_cmdbuf.c b/drivers/gpu/drm/openchrome/openchrome_h1_cmdbuf.c
deleted file mode 100644
index 85a7a0ea77ae..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_h1_cmdbuf.c
+++ /dev/null
@@ -1,666 +0,0 @@
-/* via_h1_cmdbuf.c -- DMA support for the VIA Unichrome/Pro
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
- * All Rights Reserved.
- *
- * Copyright 2004 The Unichrome project.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Tungsten Graphics,
- *    Erdi Chen,
- *    Thomas Hellstrom.
- */
-
-#include <drm/drmP.h>
-
-#include "openchrome_3d_reg.h"
-#include "openchrome_drv.h"
-
-#define CMDBUF_ALIGNMENT_SIZE   (0x100)
-#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
-
-static void via_cmdbuf_start(struct via_device *dev_priv);
-static void via_cmdbuf_pause(struct via_device *dev_priv);
-static void via_cmdbuf_reset(struct via_device *dev_priv);
-static void via_cmdbuf_rewind(struct via_device *dev_priv);
-static void via_pad_cache(struct via_device *dev_priv, int qwords);
-
-/*
- * Free space in command buffer.
- */
-
-static uint32_t via_cmdbuf_space(struct via_device *dev_priv)
-{
-	uint32_t gart_base = dev_priv->dma_offset;
-	uint32_t hw_addr = ioread32(dev_priv->hw_addr_ptr) - gart_base;
-
-	return ((hw_addr <= dev_priv->dma_low) ?
-		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
-		(hw_addr - dev_priv->dma_low));
-}
-
-/*
- * How much does the command regulator lag behind?
- */
-
-static uint32_t via_cmdbuf_lag(struct via_device *dev_priv)
-{
-	uint32_t gart_base = dev_priv->dma_offset;
-	uint32_t hw_addr = ioread32(dev_priv->hw_addr_ptr) - gart_base;
-
-	return ((hw_addr <= dev_priv->dma_low) ?
-		(dev_priv->dma_low - hw_addr) :
-		(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
-}
-
-/*
- * Check that the given size fits in the buffer, otherwise wait.
- */
-
-static inline int
-via_cmdbuf_wait(struct via_device *dev_priv, unsigned int size)
-{
-	uint32_t gart_base = dev_priv->dma_offset;
-	uint32_t cur_addr, hw_addr, next_addr;
-	volatile uint32_t *hw_addr_ptr;
-	uint32_t count;
-	hw_addr_ptr = dev_priv->hw_addr_ptr;
-	cur_addr = dev_priv->dma_low;
-	next_addr = cur_addr + size + 512 * 1024;
-	count = 1000000;
-	do {
-		hw_addr = *hw_addr_ptr - gart_base;
-		if (count-- == 0) {
-			DRM_ERROR
-			    ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
-			     hw_addr, cur_addr, next_addr);
-			return -1;
-		}
-		if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
-			usleep_range(500, 2000);
-	} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
-	return 0;
-}
-
-/*
- * Checks whether buffer head has reach the end. Rewind the ring buffer
- * when necessary.
- *
- * Returns virtual pointer to ring buffer.
- */
-
-static inline uint32_t *via_check_dma(struct via_device *dev_priv,
-				      unsigned int size)
-{
-	if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
-	    dev_priv->dma_high) {
-		via_cmdbuf_rewind(dev_priv);
-	}
-	if (via_cmdbuf_wait(dev_priv, size) != 0)
-		return NULL;
-
-	return (uint32_t *) (dev_priv->dmabuf.virtual + dev_priv->dma_low);
-}
-
-int via_dma_cleanup(struct drm_device *dev)
-{
-	if (dev->dev_private) {
-		struct via_device *dev_priv = dev->dev_private;
-
-		if (dev_priv->dmabuf.virtual) {
-			struct ttm_buffer_object *bo = dev_priv->dmabuf.bo;
-
-			via_cmdbuf_reset(dev_priv);
-
-			via_bo_unpin(bo, &dev_priv->dmabuf);
-			ttm_bo_unref(&bo);
-			dev_priv->dmabuf.virtual = NULL;
-		}
-	}
-	return 0;
-}
-
-static int via_initialize(struct drm_device *dev,
-			  struct via_device *dev_priv,
-			  drm_via_dma_init_t *init)
-{
-	struct ttm_buffer_object *bo;
-	int ret = -EFAULT;
-
-	if (!dev_priv) {
-		DRM_ERROR("via_dma_init called before via_map_init\n");
-		return ret;
-	}
-
-	if (dev_priv->dmabuf.virtual != NULL) {
-		DRM_ERROR("called again without calling cleanup\n");
-		return ret;
-	}
-
-	ret = via_bo_create(&dev_priv->ttm.bdev, &bo, init->size,
-				ttm_bo_type_kernel, TTM_PL_FLAG_TT,
-				VIA_MM_ALIGN_SIZE, PAGE_SIZE,
-				false, NULL, NULL);
-	if (!ret) {
-		ret = via_bo_pin(bo, &dev_priv->dmabuf);
-		if (ret)
-			goto out_err;
-	}
-
-	dev_priv->dma_low = 0;
-	dev_priv->dma_high = bo->num_pages << PAGE_SHIFT;
-	dev_priv->dma_wrap = dev_priv->dma_high;
-	dev_priv->dma_offset = bo->offset;
-	dev_priv->last_pause_ptr = NULL;
-	dev_priv->hw_addr_ptr =
-		(void *)(dev_priv->mmio + init->reg_pause_addr);
-
-	via_cmdbuf_start(dev_priv);
-out_err:
-	if (ret)
-		DRM_ERROR("can not ioremap TTM DMA ring buffer\n");
-	return ret;
-}
-
-int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	drm_via_dma_init_t *init = data;
-	int retcode = 0;
-
-	switch (init->func) {
-	case VIA_INIT_DMA:
-		if (!capable(CAP_SYS_ADMIN))
-			retcode = -EPERM;
-		else
-			retcode = via_initialize(dev, dev_priv, init);
-		break;
-	case VIA_CLEANUP_DMA:
-		if (!capable(CAP_SYS_ADMIN))
-			retcode = -EPERM;
-		else
-			retcode = via_dma_cleanup(dev);
-		break;
-	case VIA_DMA_INITIALIZED:
-		retcode = (dev_priv->dmabuf.virtual != NULL) ?
-			0 : -EFAULT;
-		break;
-	default:
-		retcode = -EINVAL;
-		break;
-	}
-
-	return retcode;
-}
-
-int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	uint32_t *vb;
-	int ret;
-
-	if (dev_priv->dmabuf.virtual == NULL) {
-		DRM_ERROR("called without initializing AGP ring buffer.\n");
-		return -EFAULT;
-	}
-
-	if (cmd->size > VIA_PCI_BUF_SIZE)
-		return -ENOMEM;
-
-	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
-		return -EFAULT;
-
-	/*
-	 * Running this function on AGP memory is dead slow. Therefore
-	 * we run it on a temporary cacheable system memory buffer and
-	 * copy it to AGP memory when ready.
-	 */
-	ret = via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
-					cmd->size, dev, 1);
-	if (ret)
-		return ret;
-
-	vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
-	if (vb == NULL)
-		return -EAGAIN;
-
-	memcpy(vb, dev_priv->pci_buf, cmd->size);
-
-	dev_priv->dma_low += cmd->size;
-
-	/*
-	 * Small submissions somehow stalls the CPU. (AGP cache effects?)
-	 * pad to greater size.
-	 */
-
-	if (cmd->size < 0x100)
-		via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
-	via_cmdbuf_pause(dev_priv);
-
-	return 0;
-}
-
-int via_driver_dma_quiescent(struct drm_device *dev)
-{
-	struct via_device *dev_priv = dev->dev_private;
-
-	if (!via_wait_idle(dev_priv))
-		return -EBUSY;
-	return 0;
-}
-
-int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
-	//LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-	return via_driver_dma_quiescent(dev);
-}
-
-int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	drm_via_cmdbuffer_t *cmdbuf = data;
-	int ret;
-
-	//LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
-	ret = via_dispatch_cmdbuffer(dev, cmdbuf);
-	return ret;
-}
-
-static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
-				      drm_via_cmdbuffer_t *cmd)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	int ret;
-
-	if (cmd->size > VIA_PCI_BUF_SIZE)
-		return -ENOMEM;
-	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
-		return -EFAULT;
-
-	ret = via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
-					cmd->size, dev, 0);
-	if (ret)
-		return ret;
-
-	ret =
-	    via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
-				     cmd->size);
-	return ret;
-}
-
-int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	drm_via_cmdbuffer_t *cmdbuf = data;
-	int ret;
-
-	//LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
-	ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
-	return ret;
-}
-
-static inline uint32_t *via_align_buffer(struct via_device *dev_priv,
-					 uint32_t *vb, int qw_count)
-{
-	for (; qw_count > 0; --qw_count)
-		VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
-	return vb;
-}
-
-/*
- * This function is used internally by ring buffer management code.
- *
- * Returns virtual pointer to ring buffer.
- */
-static inline uint32_t *via_get_dma(struct via_device *dev_priv)
-{
-	return (uint32_t *) (dev_priv->dmabuf.virtual + dev_priv->dma_low);
-}
-
-/*
- * Hooks a segment of data into the tail of the ring-buffer by
- * modifying the pause address stored in the buffer itself. If
- * the regulator has already paused, restart it.
- */
-static int via_hook_segment(struct via_device *dev_priv,
-			    uint32_t pause_addr_hi, uint32_t pause_addr_lo,
-			    int no_pci_fire)
-{
-	int paused, count;
-	volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
-	uint32_t reader, ptr;
-	uint32_t diff;
-
-	paused = 0;
-	mb();
-	(void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
-
-	*paused_at = pause_addr_lo;
-	mb();
-	(void) *paused_at;
-
-	reader = ioread32(dev_priv->hw_addr_ptr);
-	ptr = ((volatile void *)paused_at - dev_priv->dmabuf.virtual) +
-		dev_priv->dma_offset + 4;
-
-	dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
-
-	/*
-	 * If there is a possibility that the command reader will
-	 * miss the new pause address and pause on the old one,
-	 * In that case we need to program the new start address
-	 * using PCI.
-	 */
-
-	diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
-	count = 10000000;
-
-	while ((diff < CMDBUF_ALIGNMENT_SIZE) && count--) {
-		paused = (VIA_READ(0x41c) & 0x80000000);
-		if (paused)
-			break;
-		reader = ioread32(dev_priv->hw_addr_ptr);
-		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
-	}
-
-	paused = VIA_READ(0x41c) & 0x80000000;
-	if (paused && !no_pci_fire) {
-		reader = ioread32(dev_priv->hw_addr_ptr);
-		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
-		diff &= (dev_priv->dma_high - 1);
-		if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
-			DRM_ERROR("Paused at incorrect address. "
-				  "0x%08x, 0x%08x 0x%08x\n",
-				  ptr, reader, dev_priv->dma_diff);
-		} else if (diff == 0) {
-			/*
-			 * There is a concern that these writes may stall the PCI bus
-			 * if the GPU is not idle. However, idling the GPU first
-			 * doesn't make a difference.
-			 */
-
-			VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
-			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
-			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
-			VIA_READ(VIA_REG_TRANSPACE);
-		}
-	}
-	return paused;
-}
-
-int via_wait_idle(struct via_device *dev_priv)
-{
-	int count = 10000000;
-
-	while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_EMPTY) && --count)
-		;
-
-	while (count && (VIA_READ(VIA_REG_STATUS) &
-			   (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
-			    VIA_3D_ENG_BUSY)))
-		--count;
-	return count;
-}
-
-static uint32_t *via_align_cmd(struct via_device *dev_priv, uint32_t cmd_type,
-			       uint32_t addr, uint32_t *cmd_addr_hi,
-			       uint32_t *cmd_addr_lo, int skip_wait)
-{
-	uint32_t gart_base;
-	uint32_t cmd_addr, addr_lo, addr_hi;
-	uint32_t *vb;
-	uint32_t qw_pad_count;
-
-	if (!skip_wait)
-		via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
-
-	vb = via_get_dma(dev_priv);
-	VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
-			(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
-
-	gart_base = dev_priv->dma_offset;
-	qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
-	    ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
-
-	cmd_addr = (addr) ? addr :
-	    gart_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
-	addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
-		   (cmd_addr & HC_HAGPBpL_MASK));
-	addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
-
-	vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
-	VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
-	return vb;
-}
-
-static void via_cmdbuf_start(struct via_device *dev_priv)
-{
-	uint32_t pause_addr_lo, pause_addr_hi;
-	uint32_t start_addr, start_addr_lo;
-	uint32_t end_addr, end_addr_lo;
-	uint32_t command;
-	uint32_t gart_base;
-	uint32_t ptr;
-	uint32_t reader;
-	int count;
-
-	dev_priv->dma_low = 0;
-
-	gart_base = dev_priv->dma_offset;
-	start_addr = gart_base;
-	end_addr = gart_base + dev_priv->dma_high;
-
-	start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
-	end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
-	command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
-		   ((end_addr & 0xff000000) >> 16));
-
-	dev_priv->last_pause_ptr =
-	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
-			  &pause_addr_hi, &pause_addr_lo, 1) - 1;
-
-	mb();
-	(void) *(volatile uint32_t *) dev_priv->last_pause_ptr;
-
-	VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
-	VIA_WRITE(VIA_REG_TRANSPACE, command);
-	VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
-	VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
-
-	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
-	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
-	wmb();
-	VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
-	VIA_READ(VIA_REG_TRANSPACE);
-
-	dev_priv->dma_diff = 0;
-
-	count = 10000000;
-	while (!(VIA_READ(0x41c) & 0x80000000) && count--);
-
-	reader = ioread32(dev_priv->hw_addr_ptr);
-	ptr = ((volatile void *)dev_priv->last_pause_ptr - dev_priv->dmabuf.virtual) +
-		dev_priv->dma_offset + 4;
-
-	/*
-	 * This is the difference between where we tell the
-	 * command reader to pause and where it actually pauses.
-	 * This differs between hw implementation so we need to
-	 * detect it.
-	 */
-
-	dev_priv->dma_diff = ptr - reader;
-}
-
-static void via_pad_cache(struct via_device *dev_priv, int qwords)
-{
-	uint32_t *vb;
-
-	via_cmdbuf_wait(dev_priv, qwords + 2);
-	vb = via_get_dma(dev_priv);
-	VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
-	via_align_buffer(dev_priv, vb, qwords);
-}
-
-static inline void via_dummy_bitblt(struct via_device *dev_priv)
-{
-	uint32_t *vb = via_get_dma(dev_priv);
-	VIA_OUT_RING_H1(0x0C, (0 | (0 << 16)));
-	VIA_OUT_RING_H1(0x10, 0 | (0 << 16));
-	VIA_OUT_RING_H1(0x0, 0x1 | 0x2000 | 0xAA000000);
-}
-
-static void via_cmdbuf_rewind(struct via_device *dev_priv)
-{
-	uint32_t pause_addr_lo, pause_addr_hi;
-	uint32_t jump_addr_lo, jump_addr_hi;
-	volatile uint32_t *last_pause_ptr;
-	uint32_t dma_low_save1, dma_low_save2;
-
-	via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
-		      &jump_addr_lo, 0);
-
-	dev_priv->dma_wrap = dev_priv->dma_low;
-
-	/*
-	 * Wrap command buffer to the beginning.
-	 */
-
-	dev_priv->dma_low = 0;
-	if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
-		DRM_ERROR("via_cmdbuf_rewind failed\n");
-
-	via_dummy_bitblt(dev_priv);
-	via_dummy_bitblt(dev_priv);
-
-	last_pause_ptr =
-	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
-			  &pause_addr_lo, 0) - 1;
-	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
-		      &pause_addr_lo, 0);
-
-	*last_pause_ptr = pause_addr_lo;
-	dma_low_save1 = dev_priv->dma_low;
-
-	/*
-	 * Now, set a trap that will pause the regulator if it tries to rerun the old
-	 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
-	 * and reissues the jump command over PCI, while the regulator has already taken the jump
-	 * and actually paused at the current buffer end).
-	 * There appears to be no other way to detect this condition, since the hw_addr_pointer
-	 * does not seem to get updated immediately when a jump occurs.
-	 */
-
-	last_pause_ptr =
-		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
-			      &pause_addr_lo, 0) - 1;
-	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
-		      &pause_addr_lo, 0);
-	*last_pause_ptr = pause_addr_lo;
-
-	dma_low_save2 = dev_priv->dma_low;
-	dev_priv->dma_low = dma_low_save1;
-	via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
-	dev_priv->dma_low = dma_low_save2;
-	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-static void via_cmdbuf_flush(struct via_device *dev_priv, uint32_t cmd_type)
-{
-	uint32_t pause_addr_lo, pause_addr_hi;
-
-	via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
-	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-static void via_cmdbuf_pause(struct via_device *dev_priv)
-{
-	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
-}
-
-static void via_cmdbuf_reset(struct via_device *dev_priv)
-{
-	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
-	via_wait_idle(dev_priv);
-}
-
-/*
- * User interface to the space and lag functions.
- */
-
-int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	drm_via_cmdbuf_size_t *d_siz = data;
-	int ret = 0;
-	uint32_t tmp_size, count;
-
-	DRM_DEBUG("\n");
-	//LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-	if (dev_priv->dmabuf.virtual == NULL) {
-		DRM_ERROR("called without initializing AGP ring buffer.\n");
-		return -EFAULT;
-	}
-
-	count = 1000000;
-	tmp_size = d_siz->size;
-	switch (d_siz->func) {
-	case VIA_CMDBUF_SPACE:
-		while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
-		       && --count) {
-			if (!d_siz->wait)
-				break;
-		}
-		if (!count) {
-			DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
-			ret = -EAGAIN;
-		}
-		break;
-	case VIA_CMDBUF_LAG:
-		while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
-		       && --count) {
-			if (!d_siz->wait)
-				break;
-		}
-		if (!count) {
-			DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
-			ret = -EAGAIN;
-		}
-		break;
-	default:
-		ret = -EFAULT;
-	}
-	d_siz->size = tmp_size;
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/openchrome/openchrome_h1_dma.c b/drivers/gpu/drm/openchrome/openchrome_h1_dma.c
deleted file mode 100644
index 925dde99080b..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_h1_dma.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/* via_h1_dma.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Thomas Hellstrom.
- *    Partially based on code obtained from Digeo Inc.
- */
-
-#include <drm/drmP.h>
-
-#include "openchrome_drv.h"
-
-
-/*
- * Fire a blit engine.
- */
-static void
-via_h1_fire_dmablit(struct drm_device *dev, struct drm_via_sg_info *vsg, int engine)
-{
-	struct via_device *dev_priv = dev->dev_private;
-
-	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine * 0x10, 0);
-	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine * 0x10, 0);
-	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
-		  VIA_DMA_CSR_DE);
-	VIA_WRITE(VIA_PCI_DMA_MR0  + engine * 0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
-	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine * 0x10, 0);
-	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine * 0x10, vsg->chain_start);
-	wmb();
-	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
-	VIA_READ(VIA_PCI_DMA_CSR0  + engine * 0x04);
-}
-
-#if 0
-static void
-via_abort_dmablit(struct drm_device *dev, int engine)
-{
-	struct via_device *dev_priv = dev->dev_private;
-
-	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TA);
-}
-
-static void
-via_dmablit_engine_off(struct drm_device *dev, int engine)
-{
-	struct via_device *dev_priv = dev->dev_private;
-
-	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
-}
-#endif
-
-static void
-via_dmablit_done(struct drm_device *dev, int engine)
-{
-	struct via_device *dev_priv = dev->dev_private;
-
-	/* Clear transfer done flag. */
-	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04,  VIA_DMA_CSR_TD);
-}
-
-/*
- * Unmap a DMA mapping.
- */
-static void
-via_unmap_from_device(struct drm_device *dev, struct drm_via_sg_info *vsg)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	int num_desc = vsg->num_desc;
-	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
-	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
-	dma_addr_t next = vsg->chain_start;
-	struct via_h1_header *desc_ptr;
-
-	desc_ptr = (struct via_h1_header *) vsg->desc_pages[cur_descriptor_page] +
-						descriptor_this_page;
-	while (num_desc--) {
-		if (descriptor_this_page-- == 0) {
-			cur_descriptor_page--;
-			descriptor_this_page = vsg->descriptors_per_page - 1;
-			desc_ptr = (struct via_h1_header *) vsg->desc_pages[cur_descriptor_page] +
-							descriptor_this_page;
-		}
-
-		dma_unmap_single(dev->dev, next, dev_priv->desc_size, DMA_TO_DEVICE);
-		dma_unmap_page(dev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
-		next = (dma_addr_t) desc_ptr->next;
-		desc_ptr--;
-	}
-}
-
-/*
- * Map the DMA pages for the device, put together and map also the descriptors. Descriptors
- * are run in reverse order by the hardware because we are not allowed to update the
- * 'next' field without syncing calls when the descriptor is already mapped.
- */
-static int
-via_map_for_device(struct via_fence_engine *eng, struct drm_via_sg_info *vsg,
-			unsigned long offset)
-{
-	unsigned int num_descriptors_this_page = 0, cur_descriptor_page = 0;
-	unsigned long dev_start = eng->pool->fence_sync.bo->offset;
-	struct device *dev = eng->pool->dev->dev;
-	dma_addr_t next = VIA_DMA_DPR_EC;
-	struct via_h1_header *desc_ptr;
-	struct ttm_tt *ttm = vsg->ttm;
-	int num_desc = 0, ret = 0;
-
-	desc_ptr = (struct via_h1_header *) vsg->desc_pages[cur_descriptor_page];
-	dev_start = vsg->dev_start;
-
-	for (num_desc = 0; num_desc < ttm->num_pages; num_desc++) {
-		/* Map system pages */
-		if (!ttm->pages[num_desc]) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		desc_ptr->mem_addr = dma_map_page(dev, ttm->pages[num_desc], 0,
-						PAGE_SIZE, vsg->direction);
-		desc_ptr->dev_addr = dev_start;
-		/* size count in 16 bytes */
-		desc_ptr->size = PAGE_SIZE / 16;
-		desc_ptr->next = (uint32_t) next;
-
-		/* Map decriptors for Chaining mode */
-		next = dma_map_single(dev, desc_ptr, sizeof(*desc_ptr), DMA_TO_DEVICE);
-		desc_ptr++;
-		if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
-			num_descriptors_this_page = 0;
-			desc_ptr = (struct via_h1_header *) vsg->desc_pages[++cur_descriptor_page];
-		}
-		dev_start += PAGE_SIZE;
-	}
-
-	vsg->chain_start = next;
-	vsg->state = dr_via_device_mapped;
-out:
-	return ret;
-}
-
-/*
- * Function that frees up all resources for a blit. It is usable even if the
- * blit info has only been partially built as long as the status enum is consistent
- * with the actual status of the used resources.
- */
-static void
-via_free_sg_info(struct via_fence *fence)
-{
-	struct drm_device *dev = fence->pool->dev;
-	struct drm_via_sg_info *vsg = fence->priv;
-	int i;
-
-	switch (vsg->state) {
-	case dr_via_device_mapped:
-		via_unmap_from_device(dev, vsg);
-	case dr_via_desc_pages_alloc:
-		for (i = 0; i < vsg->num_desc_pages; ++i) {
-			if (vsg->desc_pages[i])
-				free_page((unsigned long)vsg->desc_pages[i]);
-		}
-		kfree(vsg->desc_pages);
-	default:
-		vsg->state = dr_via_sg_init;
-	}
-}
-
-static void
-via_h1_dma_fence_signaled(struct via_fence_engine *eng)
-{
-	via_dmablit_done(eng->pool->dev, eng->index);
-}
-
-/*
- * Build all info and do all mappings required for a blit.
- */
-static int
-via_h1_dma_emit(struct via_fence *fence)
-{
-	struct via_fence_engine *eng = &fence->pool->engines[fence->engine];
-	unsigned long offset = VIA_FENCE_SIZE * eng->index;
-	struct drm_via_sg_info *vsg = fence->priv;
-	int ret = 0;
-
-	ret = via_map_for_device(eng, vsg, offset);
-	if (!ret) {
-		writel(fence->seq.key, eng->read_seq);
-		via_h1_fire_dmablit(fence->pool->dev, vsg, fence->engine);
-	}
-	return ret;
-}
-
-/*
- * Init all blit engines. Currently we use two, but some hardware have 4.
- */
-int
-via_dmablit_init(struct drm_device *dev)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	struct via_fence_pool *pool;
-
-	pci_set_master(dev->pdev);
-
-	pool = via_fence_pool_init(dev, "viadrm_dma", TTM_PL_FLAG_VRAM, 4);
-	if (IS_ERR(pool))
-		return PTR_ERR(pool);
-
-	pool->fence_signaled = via_h1_dma_fence_signaled;
-	pool->fence_cleanup = via_free_sg_info;
-	pool->fence_emit = via_h1_dma_emit;
-
-	dev_priv->dma_fences = pool;
-	dev_priv->desc_size = sizeof(struct via_h1_header);
-	return 0;
-}
diff --git a/drivers/gpu/drm/openchrome/openchrome_ioc32.c b/drivers/gpu/drm/openchrome/openchrome_ioc32.c
index a7850df54203..4a5fa6626a5d 100644
--- a/drivers/gpu/drm/openchrome/openchrome_ioc32.c
+++ b/drivers/gpu/drm/openchrome/openchrome_ioc32.c
@@ -175,8 +175,6 @@ KMS_INVALID_IOCTL(via_agp_init)
 KMS_INVALID_IOCTL(via_fb_init)
 KMS_INVALID_IOCTL(via_map_init)
 KMS_INVALID_IOCTL(via_decoder_futex)
-KMS_INVALID_IOCTL(via_dma_blit)
-KMS_INVALID_IOCTL(via_dma_blit_sync)
 
 const struct drm_ioctl_desc via_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
@@ -186,14 +184,6 @@ const struct drm_ioctl_desc via_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH | DRM_MASTER),
 	DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(VIA_OLD_GEM_CREATE, via_gem_alloc, DRM_AUTH | DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(VIA_GETPARAM, via_getparam, DRM_AUTH | DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(VIA_SETPARAM, via_setparam, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(VIA_GEM_CREATE, via_gem_alloc, DRM_AUTH | DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/openchrome/openchrome_irq.c b/drivers/gpu/drm/openchrome/openchrome_irq.c
deleted file mode 100644
index deb574cfaae2..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_irq.c
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Copyright 2004 BEAM Ltd.
- * Copyright 2002 Tungsten Graphics, Inc.
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Terry Barnaby <terry1 at beam.ltd.uk>
- *    Keith Whitwell <keith at tungstengraphics.com>
- *    Thomas Hellstrom <unichrome at shipmail.org>
- *
- * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
- * interrupt, as well as an infrastructure to handle other interrupts of the chip.
- * The refresh rate is also calculated for video playback sync purposes.
- */
-
-#include "drmP.h"
-
-#include "openchrome_drv.h"
-
-
-/* HW Interrupt Register Setting */
-#define INTERRUPT_CTRL_REG1		0x200
-
-/* mmio 0x200 IRQ enable and status bits. */
-#define VIA_IRQ_ALL_ENABLE		BIT(31)
-
-#define VIA_IRQ_IGA1_VBLANK_STATUS	BIT(1)
-
-#define VIA_IRQ_IGA1_VSYNC_ENABLE	BIT(19)
-#define VIA_IRQ_IGA2_VSYNC_ENABLE	BIT(17)
-#define VIA_IRQ_IGA1_VSYNC_STATUS	BIT(3)
-#define VIA_IRQ_IGA2_VSYNC_STATUS	BIT(15)
-
-#define VIA_IRQ_CAPTURE0_ACTIVE_ENABLE	BIT(28)
-#define VIA_IRQ_CAPTURE1_ACTIVE_ENABLE	BIT(24)
-#define VIA_IRQ_CAPTURE0_ACTIVE_STATUS	BIT(12)
-#define VIA_IRQ_CAPTURE1_ACTIVE_STATUS	BIT(8)
-
-#define VIA_IRQ_HQV0_ENABLE		BIT(25)
-#define VIA_IRQ_HQV1_ENABLE		BIT(9)
-#define VIA_IRQ_HQV0_STATUS		BIT(12)
-#define VIA_IRQ_HQV1_STATUS		BIT(10)
-
-#define VIA_IRQ_DMA0_DD_ENABLE		BIT(20)
-#define VIA_IRQ_DMA0_TD_ENABLE		BIT(21)
-#define VIA_IRQ_DMA1_DD_ENABLE		BIT(22)
-#define VIA_IRQ_DMA1_TD_ENABLE		BIT(23)
-
-#define VIA_IRQ_DMA0_DD_STATUS		BIT(4)
-#define VIA_IRQ_DMA0_TD_STATUS		BIT(5)
-#define VIA_IRQ_DMA1_DD_STATUS		BIT(6)
-#define VIA_IRQ_DMA1_TD_STATUS		BIT(7)
-
-#define VIA_IRQ_LVDS_ENABLE		BIT(30)
-#define VIA_IRQ_TMDS_ENABLE		BIT(16)
-
-#define VIA_IRQ_LVDS_STATUS		BIT(27)
-#define VIA_IRQ_TMDS_STATUS		BIT(0)
-
-#define INTR_ENABLE_MASK (VIA_IRQ_DMA0_TD_ENABLE | VIA_IRQ_DMA1_TD_ENABLE | \
-			VIA_IRQ_DMA0_DD_ENABLE | VIA_IRQ_DMA1_DD_ENABLE | \
-			VIA_IRQ_IGA1_VSYNC_ENABLE | VIA_IRQ_IGA2_VSYNC_ENABLE)
-
-#define INTERRUPT_ENABLE_MASK (VIA_IRQ_CAPTURE0_ACTIVE_ENABLE | VIA_IRQ_CAPTURE1_ACTIVE_ENABLE | \
-				VIA_IRQ_HQV0_ENABLE | VIA_IRQ_HQV1_ENABLE | \
-				INTR_ENABLE_MASK)
-
-#define INTR_STATUS_MASK (VIA_IRQ_DMA0_TD_STATUS | VIA_IRQ_DMA1_TD_STATUS | \
-			VIA_IRQ_DMA0_DD_STATUS  | VIA_IRQ_DMA1_DD_STATUS  | \
-			VIA_IRQ_IGA1_VSYNC_STATUS | VIA_IRQ_IGA2_VSYNC_STATUS)
-
-#define INTERRUPT_STATUS_MASK (VIA_IRQ_CAPTURE0_ACTIVE_STATUS | VIA_IRQ_CAPTURE1_ACTIVE_STATUS | \
-				VIA_IRQ_HQV0_STATUS | VIA_IRQ_HQV1_STATUS | \
-				INTR_STATUS_MASK)
-
-/* mmio 0x1280 IRQ enabe and status bits. */
-#define INTERRUPT_CTRL_REG3		0x1280
-
-/* MM1280[9], internal TMDS interrupt status = SR3E[6] */
-#define INTERRUPT_TMDS_STATUS		0x200
-/* MM1280[30], internal TMDS interrupt control = SR3E[7] */
-#define INTERNAL_TMDS_INT_CONTROL	0x40000000
-
-#define VIA_IRQ_DP1_ENABLE		BIT(24)
-#define VIA_IRQ_DP2_ENABLE		BIT(26)
-#define VIA_IRQ_IN_TMDS_ENABLE		BIT(30)
-#define VIA_IRQ_CRT_ENABLE		BIT(20)
-
-#define VIA_IRQ_DP1_STATUS		BIT(11)
-#define VIA_IRQ_DP2_STATUS		BIT(13)
-#define VIA_IRQ_IN_TMDS_STATUS		BIT(9)
-#define VIA_IRQ_CRT_STATUS		BIT(4)
-
-/*
- * Device-specific IRQs go here. This type might need to be extended with
- * the register if there are multiple IRQ control registers.
- * Currently we activate the HQV interrupts of  Unichrome Pro group A.
- */
-
-static maskarray_t via_unichrome_irqs[] = {
-	{ VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_STATUS, VIA_PCI_DMA_CSR0,
-	  VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
-	{ VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_STATUS, VIA_PCI_DMA_CSR1,
-	  VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
-};
-static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
-static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
-
-static maskarray_t via_pro_group_a_irqs[] = {
-	{ VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_STATUS, 0x000003D0, 0x00008010,
-	  0x00000000 },
-	{ VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_STATUS, 0x000013D0, 0x00008010,
-	  0x00000000 },
-	{ VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_STATUS, VIA_PCI_DMA_CSR0,
-	  VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008 },
-	{ VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_STATUS, VIA_PCI_DMA_CSR1,
-	  VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008 },
-};
-static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
-static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-
-static irqreturn_t
-via_hpd_irq_process(struct via_device *dev_priv)
-{
-	uint32_t mm_1280 = VIA_READ(0x1280);
-	uint32_t mm_c730, mm_c7b0;
-	irqreturn_t ret = IRQ_NONE;
-
-	/* CRT sense */
-	if (mm_1280 & VIA_IRQ_CRT_ENABLE) {
-		if (mm_1280 & VIA_IRQ_CRT_STATUS) {
-			DRM_DEBUG("VIA_IRQ_CRT_HOT_PLUG!\n");
-		}
-	}
-
-	/* DP1 or Internal HDMI sense */
-	if (mm_1280 & VIA_IRQ_DP1_ENABLE) {
-		if (mm_1280 & VIA_IRQ_DP1_STATUS) {
-			mm_c730 = VIA_READ(0xc730);
-
-			switch (mm_c730 & 0xC0000000) {
-			case VIA_IRQ_DP_HOT_IRQ:
-				DRM_DEBUG("VIA_IRQ_DP1_HOT_IRQ!\n");
-				break;
-
-			case VIA_IRQ_DP_HOT_UNPLUG:
-				DRM_DEBUG("VIA_IRQ_DP1(HDMI)_HOT_UNPLUG!\n");
-				break;
-
-			case VIA_IRQ_DP_HOT_PLUG:
-				DRM_DEBUG("VIA_IRQ_DP1(HDMI)_HOT_PLUG!\n");
-				break;
-
-			case VIA_IRQ_DP_NO_INT:
-				DRM_DEBUG("VIA_IRQ_DP1_NO_INT!\n");
-				break;
-			}
-			ret = IRQ_HANDLED;
-		}
-	}
-
-	/* DP2 sense */
-	if (mm_1280 & VIA_IRQ_DP2_ENABLE) {
-		if (mm_1280 & VIA_IRQ_DP2_STATUS) {
-			mm_c7b0 = VIA_READ(0xc7b0);
-
-			switch (mm_c7b0 & 0xC0000000) {
-			case VIA_IRQ_DP_HOT_IRQ:
-				DRM_DEBUG("VIA_IRQ_DP2_HOT_IRQ!\n");
-				break;
-
-			case VIA_IRQ_DP_HOT_UNPLUG:
-				DRM_DEBUG("VIA_IRQ_DP2_HOT_UNPLUG!\n");
-				break;
-
-			case VIA_IRQ_DP_HOT_PLUG:
-				DRM_DEBUG("VIA_IRQ_DP2_HOT_PLUG!\n");
-				break;
-
-			case VIA_IRQ_DP_NO_INT:
-				DRM_DEBUG("VIA_IRQ_DP2_NO_INT!\n");
-				break;
-			}
-			ret = IRQ_HANDLED;
-		}
-	}
-
-	/* internal TMDS sense */
-	if ((dev_priv->dev->pdev->device != PCI_DEVICE_ID_VIA_VX875) ||
-	    (dev_priv->dev->pdev->device != PCI_DEVICE_ID_VIA_VX900_VGA)) {
-		if (VIA_IRQ_IN_TMDS_ENABLE & mm_1280) {
-			if (VIA_IRQ_IN_TMDS_STATUS & mm_1280) {
-				ret = IRQ_HANDLED;
-			}
-		}
-	}
-
-	/* clear interrupt status on 0x1280. */
-	VIA_WRITE(0x1280, mm_1280);
-
-	if (ret == IRQ_HANDLED)
-		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-	return ret;
-}
-
-irqreturn_t via_driver_irq_handler(int irq, void *arg)
-{
-	struct drm_device *dev = (struct drm_device *) arg;
-	struct via_device *dev_priv = dev->dev_private;
-	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
-	u32 status = VIA_READ(INTERRUPT_CTRL_REG1);
-	irqreturn_t ret = IRQ_NONE;
-	int i;
-
-	/* Handle hot plug if KMS available */
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		ret = via_hpd_irq_process(dev_priv);
-
-	if (status & VIA_IRQ_IGA1_VSYNC_STATUS) {
-		drm_handle_vblank(dev, 0);
-		ret = IRQ_HANDLED;
-	}
-
-	if (status & VIA_IRQ_IGA2_VSYNC_STATUS) {
-		drm_handle_vblank(dev, 1);
-		ret = IRQ_HANDLED;
-	}
-
-	for (i = 0; i < dev_priv->num_irqs; ++i) {
-		if (status & cur_irq->pending_mask) {
-			struct via_fence_engine *eng = NULL;
-
-			atomic_inc(&cur_irq->irq_received);
-			wake_up(&cur_irq->irq_queue);
-			ret = IRQ_HANDLED;
-
-			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
-				eng = &dev_priv->dma_fences->engines[0];
-			else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
-				eng = &dev_priv->dma_fences->engines[1];
-
-			if (eng)
-				queue_work(eng->pool->fence_wq, &eng->fence_work);
-		}
-		cur_irq++;
-	}
-
-	/* Acknowledge interrupts */
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status);
-	return ret;
-}
-
-int via_enable_vblank(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct via_crtc *iga = container_of(crtc, struct via_crtc, base);
-	struct via_device *dev_priv = dev->dev_private;
-	u32 status;
-
-	status = VIA_READ(INTERRUPT_CTRL_REG1);
-	if (iga->index) {
-		status |= VIA_IRQ_IGA2_VSYNC_ENABLE | VIA_IRQ_IGA2_VSYNC_STATUS;
-	} else {
-		status |= VIA_IRQ_IGA1_VSYNC_ENABLE | VIA_IRQ_IGA1_VSYNC_STATUS;
-	}
-
-	svga_wcrt_mask(VGABASE, 0xF3, 0, BIT(1));
-	svga_wcrt_mask(VGABASE, 0x11, BIT(4), BIT(4));
-
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status);
-	return 0;
-}
-
-void via_disable_vblank(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct via_crtc *iga = container_of(crtc, struct via_crtc, base);
-	struct via_device *dev_priv = dev->dev_private;
-	u32 status;
-
-	status = VIA_READ(INTERRUPT_CTRL_REG1);
-	if (iga->index) {
-		status &= ~VIA_IRQ_IGA2_VSYNC_ENABLE;
-	} else {
-		status &= ~VIA_IRQ_IGA1_VSYNC_ENABLE;
-	}
-
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status);
-}
-
-/**
- * when we set the irq mask enable bit, the irq status bit will be enabled
- * as well, whether the device was connected or not, so we then trigger
- * call the interrupt right now. so we should write 1 to clear the status
- * bit when enable irq mask.
- */
-void
-via_hpd_irq_state(struct via_device *dev_priv, bool enable)
-{
-	uint32_t mask = BIT(7) | BIT(5) | BIT(3) | BIT(1);
-	uint32_t value = (enable ? mask : 0);
-	uint32_t mm_1280 = VIA_READ(0x1280);
-	uint32_t mm_200 = VIA_READ(0x200);
-
-	/* Turn off/on DVI sense [7], LVDS sense [5], CRT sense [3],
-	 * and CRT hotplug [1] */
-	svga_wseq_mask(VGABASE, 0x2B, value, mask);
-
-	/* Handle external LVDS */
-	mask = VIA_IRQ_LVDS_ENABLE | VIA_IRQ_LVDS_STATUS;
-	/* Handle external TMDS on DVP1 port */
-	mask |= VIA_IRQ_TMDS_ENABLE | VIA_IRQ_TMDS_STATUS;
-
-	if (enable)
-		mm_200 |= mask;
-	else
-		mm_200 &= ~mask;
-
-	/**
-	 * only when 0x200[31] = 1 can these IRQs can be triggered.
-	 */
-	mask = VIA_IRQ_CRT_ENABLE | VIA_IRQ_CRT_STATUS;
-
-	if ((dev_priv->dev->pdev->device != PCI_DEVICE_ID_VIA_VX875) ||
-	    (dev_priv->dev->pdev->device != PCI_DEVICE_ID_VIA_VX900_VGA)) {
-		/* Internal DVI - DFPL port */
-		mask |= VIA_IRQ_IN_TMDS_ENABLE | VIA_IRQ_IN_TMDS_STATUS;
-	} else {
-		/* For both HDMI encoder and DisplayPort */
-		mask |= VIA_IRQ_DP1_ENABLE | VIA_IRQ_DP1_STATUS;
-		mask |= VIA_IRQ_DP2_ENABLE | VIA_IRQ_DP2_STATUS;
-	}
-
-	if (enable)
-		mm_1280 |= mask;
-	else
-		mm_1280 &= ~mask;
-
-	VIA_WRITE(0x1280, mm_1280);
-	VIA_WRITE(0x200, mm_200);
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-static void
-via_hotplug_work_func(struct work_struct *work)
-{
-	struct via_device *dev_priv = container_of(work,
-		struct via_device, hotplug_work);
-	struct drm_device *dev = dev_priv->dev;
-
-	DRM_DEBUG("Sending Hotplug event\n");
-
-	/* Fire off a uevent and let userspace tell us what to do */
-	drm_helper_hpd_irq_event(dev);
-}
-
-void
-via_driver_irq_preinstall(struct drm_device *dev)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	drm_via_irq_t *cur_irq;
-	u32 status;
-	int i;
-
-	cur_irq = dev_priv->via_irqs;
-
-	if (dev_priv->engine_type != VIA_ENG_H1) {
-		dev_priv->irq_masks = via_pro_group_a_irqs;
-		dev_priv->num_irqs = via_num_pro_group_a;
-		dev_priv->irq_map = via_irqmap_pro_group_a;
-
-		dev_priv->irq_pending_mask = INTR_STATUS_MASK;
-		dev_priv->irq_enable_mask = INTR_ENABLE_MASK;
-	} else {
-		dev_priv->irq_masks = via_unichrome_irqs;
-		dev_priv->num_irqs = via_num_unichrome;
-		dev_priv->irq_map = via_irqmap_unichrome;
-
-		dev_priv->irq_pending_mask = INTERRUPT_STATUS_MASK;
-		dev_priv->irq_enable_mask = INTERRUPT_ENABLE_MASK;
-	}
-
-	for (i = 0; i < dev_priv->num_irqs; ++i) {
-		atomic_set(&cur_irq->irq_received, 0);
-		cur_irq->enable_mask = dev_priv->irq_masks[i][0];
-		cur_irq->pending_mask = dev_priv->irq_masks[i][1];
-		init_waitqueue_head(&cur_irq->irq_queue);
-		cur_irq++;
-
-		DRM_DEBUG("Initializing IRQ %d\n", i);
-	}
-
-	/* Clear VSync interrupt regs */
-	status = VIA_READ(INTERRUPT_CTRL_REG1);
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status & ~(dev_priv->irq_enable_mask));
-
-	/* Acknowledge interrupts */
-	status = VIA_READ(INTERRUPT_CTRL_REG1);
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status | dev_priv->irq_pending_mask);
-
-	/* Clear hotplug settings */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		dev_priv->irq_pending_mask |= VIA_IRQ_TMDS_STATUS | VIA_IRQ_LVDS_STATUS;
-		dev_priv->irq_enable_mask |= VIA_IRQ_TMDS_ENABLE | VIA_IRQ_LVDS_ENABLE;
-
-		INIT_WORK(&dev_priv->hotplug_work, via_hotplug_work_func);
-
-		via_hpd_irq_state(dev_priv, true);
-
-		status = via_hpd_irq_process(dev_priv);
-	}
-}
-
-int
-via_driver_irq_postinstall(struct drm_device *dev)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	u32 status = VIA_READ(INTERRUPT_CTRL_REG1);
-
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status | VIA_IRQ_ALL_ENABLE |
-			dev_priv->irq_enable_mask);
-	return 0;
-}
-
-void
-via_driver_irq_uninstall(struct drm_device *dev)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	u32 status;
-
-	/* Some more magic, oh for some data sheets ! */
-	VIA_WRITE8(0x83d4, 0x11);
-	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
-
-	status = VIA_READ(INTERRUPT_CTRL_REG1);
-	VIA_WRITE(INTERRUPT_CTRL_REG1, status &
-		  ~(VIA_IRQ_IGA1_VSYNC_ENABLE | dev_priv->irq_enable_mask));
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		via_hpd_irq_state(dev_priv, false);
-}
-
-static int
-via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
-		    unsigned int *sequence)
-{
-	struct via_device *dev_priv = dev->dev_private;
-	unsigned int cur_irq_sequence;
-	drm_via_irq_t *cur_irq;
-	int ret = 0;
-	maskarray_t *masks;
-	int real_irq;
-
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	if (irq >= drm_via_irq_num) {
-		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
-		return -EINVAL;
-	}
-
-	real_irq = dev_priv->irq_map[irq];
-
-	if (real_irq < 0) {
-		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
-			  irq);
-		return -EINVAL;
-	}
-
-	masks = dev_priv->irq_masks;
-	cur_irq = dev_priv->via_irqs + real_irq;
-
-	if (masks[real_irq][2] && !force_sequence) {
-		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
-			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
-			     masks[irq][4]));
-		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
-	} else {
-		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
-			    (((cur_irq_sequence =
-			       atomic_read(&cur_irq->irq_received)) -
-			      *sequence) <= (1 << 23)));
-	}
-	*sequence = cur_irq_sequence;
-	return ret;
-}
-
-int
-via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	drm_via_irqwait_t *irqwait = data;
-	struct timeval now;
-	int ret = 0;
-	struct via_device *dev_priv = dev->dev_private;
-	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
-	int force_sequence;
-
-	if (irqwait->request.irq >= dev_priv->num_irqs) {
-		DRM_ERROR("Trying to wait on unknown irq %d\n",
-			  irqwait->request.irq);
-		return -EINVAL;
-	}
-
-	cur_irq += irqwait->request.irq;
-
-	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
-	case VIA_IRQ_RELATIVE:
-		irqwait->request.sequence +=
-			atomic_read(&cur_irq->irq_received);
-		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
-	case VIA_IRQ_ABSOLUTE:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
-		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
-		return -EINVAL;
-	}
-
-	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
-
-	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
-				  &irqwait->request.sequence);
-	do_gettimeofday(&now);
-	irqwait->reply.tval_sec = now.tv_sec;
-	irqwait->reply.tval_usec = now.tv_usec;
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/openchrome/openchrome_sgdma.c b/drivers/gpu/drm/openchrome/openchrome_sgdma.c
deleted file mode 100644
index 352de056ab90..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_sgdma.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2012 James Simmons <jsimmons at infradead.org>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "openchrome_drv.h"
-
-
-static int
-via_pcie_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
-{
-	struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
-	struct via_device *dev_priv =
-		container_of(ttm->bdev, struct via_device, ttm.bdev);
-	int i;
-
-	/* Disable gart table HW protect */
-	svga_wseq_mask(VGABASE, 0x6C, 0x00, BIT(7));
-
-	/* Update the relevant entries */
-	dma_tt->offset = mem->start << PAGE_SHIFT;
-	for (i = 0; i < ttm->num_pages; i++) {
-		writel(page_to_pfn(ttm->pages[i]) & 0x3FFFFFFF,
-			dev_priv->gart.virtual + dma_tt->offset + i);
-	}
-
-	/* Invalided GTI cache */
-	svga_wseq_mask(VGABASE, 0x6F, BIT(7), BIT(7));
-
-	/* Enable gart table HW protect */
-	svga_wseq_mask(VGABASE, 0x6C, BIT(7), BIT(7));
-	return 1;
-}
-
-static int
-via_pcie_sgdma_unbind(struct ttm_tt *ttm)
-{
-	struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
-	struct via_device *dev_priv =
-		container_of(ttm->bdev, struct via_device, ttm.bdev);
-	int i;
-
-	if (ttm->state != tt_bound)
-		return 0;
-
-	/* Disable gart table HW protect */
-	svga_wseq_mask(VGABASE, 0x6C, 0x00, BIT(7));
-
-	/* Update the relevant entries */
-	for (i = 0; i < ttm->num_pages; i++)
-		writel(0x80000000, dev_priv->gart.virtual + dma_tt->offset + i);
-	dma_tt->offset = 0;
-
-	/* Invalided GTI cache */
-	svga_wseq_mask(VGABASE, 0x6F, BIT(7), BIT(7));
-
-	/* Enable gart table HW protect */
-	svga_wseq_mask(VGABASE, 0x6C, BIT(7), BIT(7));
-	return 0;
-}
-
-static void
-via_sgdma_destroy(struct ttm_tt *ttm)
-{
-	struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
-
-	if (ttm) {
-		ttm_dma_tt_fini(&dma_tt->sgdma);
-		kfree(dma_tt);
-	}
-}
-
-static struct ttm_backend_func ttm_sgdma_func = {
-	.bind = via_pcie_sgdma_bind,
-	.unbind = via_pcie_sgdma_unbind,
-	.destroy = via_sgdma_destroy,
-};
-
-struct ttm_tt* via_sgdma_backend_init(struct ttm_buffer_object *bo,
-					uint32_t page_flags)
-{
-	struct sgdma_tt *dma_tt;
-
-	dma_tt = kzalloc(sizeof(*dma_tt), GFP_KERNEL);
-	if (!dma_tt)
-		return NULL;
-
-	dma_tt->sgdma.ttm.func = &ttm_sgdma_func;
-
-	if (ttm_dma_tt_init(&dma_tt->sgdma, bo, page_flags)) {
-		kfree(dma_tt);
-		return NULL;
-	}
-	return &dma_tt->sgdma.ttm;
-}
-EXPORT_SYMBOL(via_sgdma_backend_init);
diff --git a/drivers/gpu/drm/openchrome/openchrome_ttm.c b/drivers/gpu/drm/openchrome/openchrome_ttm.c
index b9d449a99e32..7e627e38d4ab 100644
--- a/drivers/gpu/drm/openchrome/openchrome_ttm.c
+++ b/drivers/gpu/drm/openchrome/openchrome_ttm.c
@@ -103,101 +103,6 @@ static void via_ttm_bo_destroy(struct ttm_buffer_object *bo)
 	heap = NULL;
 }
 
-struct ttm_tt* via_ttm_tt_create(struct ttm_buffer_object *bo,
-					uint32_t page_flags)
-{
-	struct via_device *dev_priv = container_of(bo->bdev,
-					struct via_device, ttm.bdev);
-
-#if IS_ENABLED(CONFIG_AGP)
-	if (pci_find_capability(dev_priv->dev->pdev, PCI_CAP_ID_AGP)) {
-		return ttm_agp_tt_create(bo,
-					dev_priv->dev->agp->bridge,
-					page_flags);
-	}
-#endif
-
-	return via_sgdma_backend_init(bo, page_flags);
-}
-
-static int via_ttm_tt_populate(struct ttm_tt *ttm,
-				struct ttm_operation_ctx *ctx)
-{
-	struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
-	struct ttm_dma_tt *sgdma = &dma_tt->sgdma;
-	struct via_device *dev_priv = container_of(ttm->bdev,
-					struct via_device, ttm.bdev);
-	struct drm_device *dev = dev_priv->dev;
-	unsigned int i;
-	int ret = 0;
-
-	if (ttm->state != tt_unpopulated)
-		return 0;
-
-#if IS_ENABLED(CONFIG_AGP)
-	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
-		return ttm_agp_tt_populate(ttm, ctx);
-#endif
-
-#ifdef CONFIG_SWIOTLB
-	if (swiotlb_nr_tbl())
-		return ttm_dma_populate(sgdma, dev->dev, ctx);
-#endif
-
-	ret = ttm_pool_populate(ttm, ctx);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < ttm->num_pages; i++) {
-		sgdma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
-							0, PAGE_SIZE,
-							PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(dev->pdev, sgdma->dma_address[i])) {
-			while (--i) {
-				pci_unmap_page(dev->pdev, sgdma->dma_address[i],
-						PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-				sgdma->dma_address[i] = 0;
-			}
-			ttm_pool_unpopulate(ttm);
-			return -EFAULT;
-		}
-	}
-	return ret;
-}
-
-static void via_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
-	struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
-	struct ttm_dma_tt *sgdma = &dma_tt->sgdma;
-	struct via_device *dev_priv = container_of(ttm->bdev,
-					struct via_device, ttm.bdev);
-	struct drm_device *dev = dev_priv->dev;
-	unsigned int i;
-
-#if IS_ENABLED(CONFIG_AGP)
-	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
-		ttm_agp_tt_unpopulate(ttm);
-		return;
-	}
-#endif
-
-#ifdef CONFIG_SWIOTLB
-	if (swiotlb_nr_tbl()) {
-		ttm_dma_unpopulate(sgdma, dev->dev);
-		return;
-	}
-#endif
-
-	for (i = 0; i < ttm->num_pages; i++) {
-		if (sgdma->dma_address[i]) {
-			pci_unmap_page(dev->pdev, sgdma->dma_address[i],
-					PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		}
-	}
-
-	ttm_pool_unpopulate(ttm);
-}
-
 static int via_invalidate_caches(struct ttm_bo_device *bdev,
 					uint32_t flags)
 {
@@ -294,217 +199,6 @@ static void via_evict_flags(struct ttm_buffer_object *bo,
 	}
 }
 
-/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps
- * track of the pages we allocate. We don't want to use kmalloc for the descriptor
- * chain because it may be quite large for some blits, and pages don't need to be
- * contingous.
- */
-struct drm_via_sg_info* via_alloc_desc_pages(struct ttm_tt *ttm,
-					struct drm_device *dev,
-					unsigned long dev_start,
-					enum dma_data_direction direction)
-{
-	struct drm_via_sg_info *vsg = kzalloc(sizeof(*vsg), GFP_KERNEL);
-	struct via_device *dev_priv = dev->dev_private;
-	int desc_size = dev_priv->desc_size, i;
-
-	vsg->ttm = ttm;
-	vsg->dev_start = dev_start;
-	vsg->direction = direction;
-	vsg->num_desc = ttm->num_pages;
-	vsg->descriptors_per_page = PAGE_SIZE / desc_size;
-	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
-				vsg->descriptors_per_page;
-
-	vsg->desc_pages = kzalloc(vsg->num_desc_pages * sizeof(void *),
-					GFP_KERNEL);
-	if (!vsg->desc_pages)
-		return ERR_PTR(-ENOMEM);
-
-	vsg->state = dr_via_desc_pages_alloc;
-
-	/* Alloc pages for descriptor chain */
-	for (i = 0; i < vsg->num_desc_pages; ++i) {
-		vsg->desc_pages[i] = (unsigned long *) __get_free_page(GFP_KERNEL);
-
-		if (!vsg->desc_pages[i])
-			return ERR_PTR(-ENOMEM);
-	}
-	return vsg;
-}
-
-/* Move between GART and VRAM */
-static int via_move_blit(struct ttm_buffer_object *bo,
-				bool evict, bool no_wait_gpu,
-				struct ttm_mem_reg *new_mem,
-				struct ttm_mem_reg *old_mem)
-{
-	struct via_device *dev_priv = container_of(bo->bdev,
-					struct via_device, ttm.bdev);
-	enum dma_data_direction direction = DMA_TO_DEVICE;
-	unsigned long old_start, new_start, dev_addr = 0;
-	struct drm_via_sg_info *vsg;
-	int ret = -ENXIO;
-	struct via_fence *fence;
-
-	/* Real CPU physical address */
-	old_start = (old_mem->start << PAGE_SHIFT) + old_mem->bus.base;
-	new_start = (new_mem->start << PAGE_SHIFT) + new_mem->bus.base;
-
-	if (old_mem->mem_type == TTM_PL_VRAM) {
-		direction = DMA_FROM_DEVICE;
-		dev_addr = old_start;
-	} else if (new_mem->mem_type == TTM_PL_VRAM) {
-		/* direction is DMA_TO_DEVICE */
-		dev_addr = new_start;
-	}
-
-	/* device addr must be 16 byte align */
-	if (dev_addr & 0x0F)
-		return ret;
-
-	vsg = via_alloc_desc_pages(bo->ttm, dev_priv->dev, dev_addr,
-					direction);
-	if (unlikely(IS_ERR(vsg)))
-		return PTR_ERR(vsg);
-
-	fence = via_fence_create_and_emit(dev_priv->dma_fences, vsg, 0);
-	if (unlikely(IS_ERR(fence)))
-		return PTR_ERR(fence);
-	return ttm_bo_move_accel_cleanup(bo, (void *)fence, evict, new_mem);
-}
-
-static int via_move_from_vram(struct ttm_buffer_object *bo,
-				struct ttm_operation_ctx *ctx,
-				struct ttm_mem_reg *new_mem)
-{
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct ttm_mem_reg tmp_mem;
-	struct ttm_placement placement;
-	struct ttm_place place;
-	int ret;
-
-	tmp_mem = *new_mem;
-	tmp_mem.mm_node = NULL;
-
-	place.fpfn = place.lpfn = 0;
-	place.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-
-	placement.num_busy_placement = placement.num_placement = 1;
-	placement.busy_placement = placement.placement = &place;
-
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
-	if (unlikely(ret))
-		return ret;
-
-	/* Allocate some DMA memory for the GART address space */
-	ret = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
-	if (unlikely(ret))
-		goto out_cleanup;
-
-	ret = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
-	if (unlikely(ret))
-		goto out_cleanup;
-
-	/* Move from the VRAM to GART space */
-	ret = via_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
-	if (unlikely(ret))
-		goto out_cleanup;
-
-	/* Expose the GART region to the system memory */
-	ret = ttm_bo_move_ttm(bo, ctx, new_mem);
-out_cleanup:
-	ttm_bo_mem_put(bo, &tmp_mem);
-	return ret;
-}
-
-static int via_move_to_vram(struct ttm_buffer_object *bo,
-				struct ttm_operation_ctx *ctx,
-				struct ttm_mem_reg *new_mem)
-{
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct ttm_mem_reg tmp_mem;
-	struct ttm_placement placement;
-	struct ttm_place place;
-	int ret;
-
-	tmp_mem = *new_mem;
-	tmp_mem.mm_node = NULL;
-
-	place.fpfn = place.lpfn = 0;
-	place.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-
-	placement.busy_placement = placement.placement = &place;
-	placement.num_busy_placement = placement.num_placement = 1;
-
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
-	if (unlikely(ret))
-		return ret;
-
-	/* Expose the GART region to the system memory */
-	ret = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
-	if (unlikely(ret))
-		goto out_cleanup;
-
-	/* Move from the GART to VRAM */
-	ret = via_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
-out_cleanup:
-	ttm_bo_mem_put(bo, &tmp_mem);
-	return ret;
-}
-
-static int via_bo_move(struct ttm_buffer_object *bo,
-			bool evict,
-			struct ttm_operation_ctx *ctx,
-			struct ttm_mem_reg *new_mem)
-
-{
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	int ret = 0;
-
-	DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
-	if ((old_mem->mem_type == TTM_PL_SYSTEM) && (!bo->ttm)) {
-		BUG_ON(old_mem->mm_node != NULL);
-		*old_mem = *new_mem;
-		new_mem->mm_node = NULL;
-		goto exit;
-	}
-
-	/* No real memory copy. Just use the new_mem
-	 * directly. */
-	if (((old_mem->mem_type == TTM_PL_SYSTEM) &&
-			(new_mem->mem_type == TTM_PL_TT)) ||
-		((old_mem->mem_type == TTM_PL_TT) &&
-			(new_mem->mem_type == TTM_PL_SYSTEM)) ||
-		(new_mem->mem_type == TTM_PL_PRIV)) {
-		BUG_ON(old_mem->mm_node != NULL);
-		*old_mem = *new_mem;
-		new_mem->mm_node = NULL;
-		goto exit;
-	}
-
-	/* Accelerated copy involving the VRAM. */
-	if ((old_mem->mem_type == TTM_PL_VRAM) &&
-		(new_mem->mem_type == TTM_PL_SYSTEM)) {
-		ret = via_move_from_vram(bo, ctx, new_mem);
-	} else if ((old_mem->mem_type == TTM_PL_SYSTEM) &&
-		(new_mem->mem_type == TTM_PL_VRAM)) {
-		ret = via_move_to_vram(bo, ctx, new_mem);
-	} else {
-		ret = via_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
-	}
-
-	if (ret) {
-		ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
-	}
-
-exit:
-	DRM_DEBUG_KMS("Exiting %s.\n", __func__);
-	return ret;
-}
-
 static int via_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
 					struct ttm_mem_reg *mem)
 {
@@ -567,13 +261,9 @@ static int via_verify_access(struct ttm_buffer_object *bo,
 }
 
 static struct ttm_bo_driver via_bo_driver = {
-	.ttm_tt_create		= via_ttm_tt_create,
-	.ttm_tt_populate	= via_ttm_tt_populate,
-	.ttm_tt_unpopulate	= via_ttm_tt_unpopulate,
 	.invalidate_caches	= via_invalidate_caches,
 	.init_mem_type		= via_init_mem_type,
 	.evict_flags		= via_evict_flags,
-	.move			= via_bo_move,
 	.verify_access		= via_verify_access,
 	.io_mem_reserve		= via_ttm_io_mem_reserve,
 	.io_mem_free		= via_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/openchrome/openchrome_verifier.c b/drivers/gpu/drm/openchrome/openchrome_verifier.c
deleted file mode 100644
index 4fa554032562..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_verifier.c
+++ /dev/null
@@ -1,1139 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellstrom 2004, 2005.
- * This code was written using docs obtained under NDA from VIA Inc.
- *
- * Don't run this code directly on an AGP buffer. Due to cache problems it will
- * be very slow.
- */
-
-#include <drm/drmP.h>
-#include <drm/via_drm.h>
-
-#include "openchrome_3d_reg.h"
-#include "openchrome_verifier.h"
-#include "openchrome_drv.h"
-
-
-enum verifier_state {
-	state_command,
-	state_header2,
-	state_header1,
-	state_vheader5,
-	state_vheader6,
-	state_error
-};
-
-enum hazard {
-	no_check = 0,
-	check_for_header2,
-	check_for_header1,
-	check_for_header2_err,
-	check_for_header1_err,
-	check_for_fire,
-	check_z_buffer_addr0,
-	check_z_buffer_addr1,
-	check_z_buffer_addr_mode,
-	check_destination_addr0,
-	check_destination_addr1,
-	check_destination_addr_mode,
-	check_for_dummy,
-	check_for_dd,
-	check_texture_addr0,
-	check_texture_addr1,
-	check_texture_addr2,
-	check_texture_addr3,
-	check_texture_addr4,
-	check_texture_addr5,
-	check_texture_addr6,
-	check_texture_addr7,
-	check_texture_addr8,
-	check_texture_addr_mode,
-	check_for_vertex_count,
-	check_number_texunits,
-	forbidden_command
-};
-
-/*
- * Associates each hazard above with a possible multi-command
- * sequence. For example an address that is split over multiple
- * commands and that needs to be checked at the first command
- * that does not include any part of the address.
- */
-
-static enum drm_via_sequence seqs[] = {
-	no_sequence,
-	no_sequence,
-	no_sequence,
-	no_sequence,
-	no_sequence,
-	no_sequence,
-	z_address,
-	z_address,
-	z_address,
-	dest_address,
-	dest_address,
-	dest_address,
-	no_sequence,
-	no_sequence,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	tex_address,
-	no_sequence
-};
-
-struct hz_init {
-	unsigned int code;
-	enum hazard hz;
-};
-
-/* for atrribute other than context hazard detect */
-static struct hz_init init_table1[] = {
-	{0xf2, check_for_header2_err},
-	{0xf0, check_for_header1_err},
-	{0xee, check_for_fire},
-	{0xcc, check_for_dummy},
-	{0xdd, check_for_dd},
-	{0x00, no_check},
-	{0x10, check_z_buffer_addr0},
-	{0x11, check_z_buffer_addr1},
-	{0x12, check_z_buffer_addr_mode},
-	{0x13, no_check},
-	{0x14, no_check},
-	{0x15, no_check},
-	{0x23, no_check},
-	{0x24, no_check},
-	{0x33, no_check},
-	{0x34, no_check},
-	{0x35, no_check},
-	{0x36, no_check},
-	{0x37, no_check},
-	{0x38, no_check},
-	{0x39, no_check},
-	{0x3A, no_check},
-	{0x3B, no_check},
-	{0x3C, no_check},
-	{0x3D, no_check},
-	{0x3E, no_check},
-	{0x40, check_destination_addr0},
-	{0x41, check_destination_addr1},
-	{0x42, check_destination_addr_mode},
-	{0x43, no_check},
-	{0x44, no_check},
-	{0x50, no_check},
-	{0x51, no_check},
-	{0x52, no_check},
-	{0x53, no_check},
-	{0x54, no_check},
-	{0x55, no_check},
-	{0x56, no_check},
-	{0x57, no_check},
-	{0x58, no_check},
-	{0x70, no_check},
-	{0x71, no_check},
-	{0x78, no_check},
-	{0x79, no_check},
-	{0x7A, no_check},
-	{0x7B, no_check},
-	{0x7C, no_check},
-	{0x7D, check_for_vertex_count}
-};
-
-/* for texture stage's hazard detect */
-static struct hz_init init_table2[] = {
-	{0xf2, check_for_header2_err},
-	{0xf0, check_for_header1_err},
-	{0xee, check_for_fire},
-	{0xcc, check_for_dummy},
-	{0x00, check_texture_addr0},
-	{0x01, check_texture_addr0},
-	{0x02, check_texture_addr0},
-	{0x03, check_texture_addr0},
-	{0x04, check_texture_addr0},
-	{0x05, check_texture_addr0},
-	{0x06, check_texture_addr0},
-	{0x07, check_texture_addr0},
-	{0x08, check_texture_addr0},
-	{0x09, check_texture_addr0},
-	{0x0A, check_texture_addr0},
-	{0x0B, check_texture_addr0},
-	{0x20, check_texture_addr1},
-	{0x21, check_texture_addr1},
-	{0x22, check_texture_addr1},
-	{0x23, check_texture_addr4},
-	{0x2B, check_texture_addr3},
-	{0x2C, check_texture_addr3},
-	{0x2D, check_texture_addr3},
-	{0x2E, check_texture_addr3},
-	{0x2F, check_texture_addr3},
-	{0x30, check_texture_addr3},
-	{0x31, check_texture_addr3},
-	{0x32, check_texture_addr3},
-	{0x33, check_texture_addr3},
-	{0x34, check_texture_addr3},
-	{0x35, check_texture_addr3},
-	{0x36, check_texture_addr3},
-	{0x4B, check_texture_addr5},
-	{0x4C, check_texture_addr6},
-	{0x51, check_texture_addr7},
-	{0x52, check_texture_addr8},
-	{0x77, check_texture_addr2},
-	{0x78, no_check},
-	{0x79, no_check},
-	{0x7A, no_check},
-	{0x7B, check_texture_addr_mode},
-	{0x7C, no_check},
-	{0x7D, no_check},
-	{0x7E, no_check},
-	{0x7F, no_check},
-	{0x80, no_check},
-	{0x81, no_check},
-	{0x82, no_check},
-	{0x83, no_check},
-	{0x85, no_check},
-	{0x86, no_check},
-	{0x87, no_check},
-	{0x88, no_check},
-	{0x89, no_check},
-	{0x8A, no_check},
-	{0x90, no_check},
-	{0x91, no_check},
-	{0x92, no_check},
-	{0x93, no_check}
-};
-
-/* Check for flexible vertex format */
-static struct hz_init init_table3[] = {
-	{0xf2, check_for_header2_err},
-	{0xf0, check_for_header1_err},
-	{0xcc, check_for_dummy},
-	{0x00, check_number_texunits},
-	{0x01, no_check},
-	{0x02, no_check},
-	{0x03, no_check}
-};
-
-static enum hazard table1[256];
-static enum hazard table2[256];
-static enum hazard table3[256];
-
-static inline int
-eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
-{
-	if ((buf_end - *buf) >= num_words) {
-		*buf += num_words;
-		return 0;
-	}
-	DRM_ERROR("Illegal termination of DMA command buffer\n");
-	return 1;
-}
-
-/*
- * Partially stolen from drm_memory.h
- *
-static inline drm_local_map_t *via_drm_lookup_agp_map(struct drm_via_state *seq,
-						    unsigned long offset,
-						    unsigned long size,
-						    struct drm_device *dev)
-{
-	struct drm_map_list *r_list;
-	drm_local_map_t *map = seq->map_cache;
-
-	if (map && map->offset <= offset
-	    && (offset + size) <= (map->offset + map->size)) {
-		return map;
-	}
-
-	list_for_each_entry(r_list, &dev->maplist, head) {
-		map = r_list->map;
-		if (!map)
-			continue;
-		if (map->offset <= offset
-		    && (offset + size) <= (map->offset + map->size)
-		    && !(map->flags & _DRM_RESTRICTED)
-		    && (map->type == _DRM_AGP)) {
-			seq->map_cache = map;
-			return map;
-		}
-	}
-	return NULL;
-}
-
-*
- * Require that all AGP texture levels reside in the same AGP map which should
- * be mappable by the client. This is not a big restriction.
- * FIXME: To actually enforce this security policy strictly, drm_rmmap
- * would have to wait for dma quiescent before removing an AGP map.
- * The via_drm_lookup_agp_map call in reality seems to take
- * very little CPU time.
- */
-
-static inline int finish_current_sequence(struct drm_via_state *cur_seq)
-{
-	switch (cur_seq->unfinished) {
-	case z_address:
-		DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
-		break;
-	case dest_address:
-		DRM_DEBUG("Destination start address is 0x%x\n",
-			  cur_seq->d_addr);
-		break;
-	case tex_address:
-		if (cur_seq->agp_texture) {
-			unsigned start =
-			    cur_seq->tex_level_lo[cur_seq->texture];
-			unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
-			unsigned long lo = ~0, hi = 0, tmp;
-			uint32_t *addr, *pitch, *height, tex;
-			unsigned i;
-			int npot;
-
-			if (end > 9)
-				end = 9;
-			if (start > 9)
-				start = 9;
-
-			addr =
-			    &(cur_seq->t_addr[tex = cur_seq->texture][start]);
-			pitch = &(cur_seq->pitch[tex][start]);
-			height = &(cur_seq->height[tex][start]);
-			npot = cur_seq->tex_npot[tex];
-			for (i = start; i <= end; ++i) {
-				tmp = *addr++;
-				if (tmp < lo)
-					lo = tmp;
-				if (i == 0 && npot)
-					tmp += (*height++ * *pitch++);
-				else
-					tmp += (*height++ << *pitch++);
-				if (tmp > hi)
-					hi = tmp;
-			}
-
-			/*if (!via_drm_lookup_agp_map
-			    (cur_seq, lo, hi - lo, cur_seq->dev)) {
-				DRM_ERROR
-				    ("AGP texture is not in allowed map\n");
-				return 2;
-			}*/
-		}
-		break;
-	default:
-		break;
-	}
-	cur_seq->unfinished = no_sequence;
-	return 0;
-}
-
-static inline int
-investigate_hazard(uint32_t cmd, enum hazard hz, struct drm_via_state *cur_seq)
-{
-	register uint32_t tmp, *tmp_addr;
-
-	if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
-		int ret;
-		if ((ret = finish_current_sequence(cur_seq)))
-			return ret;
-	}
-
-	switch (hz) {
-	case check_for_header2:
-		if (cmd == HALCYON_HEADER2)
-			return 1;
-		return 0;
-	case check_for_header1:
-		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
-			return 1;
-		return 0;
-	case check_for_header2_err:
-		if (cmd == HALCYON_HEADER2)
-			return 1;
-		DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
-		break;
-	case check_for_header1_err:
-		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
-			return 1;
-		DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
-		break;
-	case check_for_fire:
-		if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
-			return 1;
-		DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
-		break;
-	case check_for_dummy:
-		if (HC_DUMMY == cmd)
-			return 0;
-		DRM_ERROR("Illegal DMA HC_DUMMY command\n");
-		break;
-	case check_for_dd:
-		if (0xdddddddd == cmd)
-			return 0;
-		DRM_ERROR("Illegal DMA 0xdddddddd command\n");
-		break;
-	case check_z_buffer_addr0:
-		cur_seq->unfinished = z_address;
-		cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
-		    (cmd & 0x00FFFFFF);
-		return 0;
-	case check_z_buffer_addr1:
-		cur_seq->unfinished = z_address;
-		cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
-		    ((cmd & 0xFF) << 24);
-		return 0;
-	case check_z_buffer_addr_mode:
-		cur_seq->unfinished = z_address;
-		if ((cmd & 0x0000C000) == 0)
-			return 0;
-		DRM_ERROR("Attempt to place Z buffer in system memory\n");
-		return 2;
-	case check_destination_addr0:
-		cur_seq->unfinished = dest_address;
-		cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
-		    (cmd & 0x00FFFFFF);
-		return 0;
-	case check_destination_addr1:
-		cur_seq->unfinished = dest_address;
-		cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
-		    ((cmd & 0xFF) << 24);
-		return 0;
-	case check_destination_addr_mode:
-		cur_seq->unfinished = dest_address;
-		if ((cmd & 0x0000C000) == 0)
-			return 0;
-		DRM_ERROR
-		    ("Attempt to place 3D drawing buffer in system memory\n");
-		return 2;
-	case check_texture_addr0:
-		cur_seq->unfinished = tex_address;
-		tmp = (cmd >> 24);
-		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
-		*tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
-		return 0;
-	case check_texture_addr1:
-		cur_seq->unfinished = tex_address;
-		tmp = ((cmd >> 24) - 0x20);
-		tmp += tmp << 1;
-		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
-		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
-		tmp_addr++;
-		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
-		tmp_addr++;
-		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
-		return 0;
-	case check_texture_addr2:
-		cur_seq->unfinished = tex_address;
-		cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
-		cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
-		return 0;
-	case check_texture_addr3:
-		cur_seq->unfinished = tex_address;
-		tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
-		if (tmp == 0 &&
-		    (cmd & HC_HTXnEnPit_MASK)) {
-			cur_seq->pitch[cur_seq->texture][tmp] =
-				(cmd & HC_HTXnLnPit_MASK);
-			cur_seq->tex_npot[cur_seq->texture] = 1;
-		} else {
-			cur_seq->pitch[cur_seq->texture][tmp] =
-				(cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
-			cur_seq->tex_npot[cur_seq->texture] = 0;
-			if (cmd & 0x000FFFFF) {
-				DRM_ERROR
-					("Unimplemented texture level 0 pitch mode.\n");
-				return 2;
-			}
-		}
-		return 0;
-	case check_texture_addr4:
-		cur_seq->unfinished = tex_address;
-		tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
-		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
-		return 0;
-	case check_texture_addr5:
-	case check_texture_addr6:
-		cur_seq->unfinished = tex_address;
-		/*
-		 * Texture width. We don't care since we have the pitch.
-		 */
-		return 0;
-	case check_texture_addr7:
-		cur_seq->unfinished = tex_address;
-		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
-		tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
-		tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
-		tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
-		tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
-		tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
-		tmp_addr[0] = 1 << (cmd & 0x0000000F);
-		return 0;
-	case check_texture_addr8:
-		cur_seq->unfinished = tex_address;
-		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
-		tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
-		tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
-		tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
-		tmp_addr[6] = 1 << (cmd & 0x0000000F);
-		return 0;
-	case check_texture_addr_mode:
-		cur_seq->unfinished = tex_address;
-		if (2 == (tmp = cmd & 0x00000003)) {
-			DRM_ERROR
-			    ("Attempt to fetch texture from system memory.\n");
-			return 2;
-		}
-		cur_seq->agp_texture = (tmp == 3);
-		cur_seq->tex_palette_size[cur_seq->texture] =
-		    (cmd >> 16) & 0x000000007;
-		return 0;
-	case check_for_vertex_count:
-		cur_seq->vertex_count = cmd & 0x0000FFFF;
-		return 0;
-	case check_number_texunits:
-		cur_seq->multitex = (cmd >> 3) & 1;
-		return 0;
-	default:
-		DRM_ERROR("Illegal DMA data: 0x%08x\n", cmd);
-		return 2;
-	}
-	return 2;
-}
-
-static inline int
-via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
-		    struct drm_via_state *cur_seq)
-{
-	struct via_device *dev_priv =
-	    (struct via_device *) cur_seq->dev->dev_private;
-	uint32_t a_fire, bcmd, dw_count;
-	int ret = 0;
-	int have_fire;
-	const uint32_t *buf = *buffer;
-
-	while (buf < buf_end) {
-		have_fire = 0;
-		if ((buf_end - buf) < 2) {
-			DRM_ERROR
-			    ("Unexpected termination of primitive list.\n");
-			ret = 1;
-			break;
-		}
-		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
-			break;
-		bcmd = *buf++;
-		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
-			DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
-				  *buf);
-			ret = 1;
-			break;
-		}
-		a_fire =
-		    *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
-		    HC_HE3Fire_MASK;
-
-		/*
-		 * How many dwords per vertex ?
-		 */
-
-		if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
-			DRM_ERROR("Illegal B command vertex data for AGP.\n");
-			ret = 1;
-			break;
-		}
-
-		dw_count = 0;
-		if (bcmd & (1 << 7))
-			dw_count += (cur_seq->multitex) ? 2 : 1;
-		if (bcmd & (1 << 8))
-			dw_count += (cur_seq->multitex) ? 2 : 1;
-		if (bcmd & (1 << 9))
-			dw_count++;
-		if (bcmd & (1 << 10))
-			dw_count++;
-		if (bcmd & (1 << 11))
-			dw_count++;
-		if (bcmd & (1 << 12))
-			dw_count++;
-		if (bcmd & (1 << 13))
-			dw_count++;
-		if (bcmd & (1 << 14))
-			dw_count++;
-
-		while (buf < buf_end) {
-			if (*buf == a_fire) {
-				if (dev_priv->num_fire_offsets >=
-				    VIA_FIRE_BUF_SIZE) {
-					DRM_ERROR("Fire offset buffer full.\n");
-					ret = 1;
-					break;
-				}
-				dev_priv->fire_offsets[dev_priv->
-						       num_fire_offsets++] =
-				    buf;
-				have_fire = 1;
-				buf++;
-				if (buf < buf_end && *buf == a_fire)
-					buf++;
-				break;
-			}
-			if ((*buf == HALCYON_HEADER2) ||
-			    ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
-				DRM_ERROR("Missing Vertex Fire command, "
-					  "Stray Vertex Fire command  or verifier "
-					  "lost sync.\n");
-				ret = 1;
-				break;
-			}
-			if ((ret = eat_words(&buf, buf_end, dw_count)))
-				break;
-		}
-		if (buf >= buf_end && !have_fire) {
-			DRM_ERROR("Missing Vertex Fire command or verifier "
-				  "lost sync.\n");
-			ret = 1;
-			break;
-		}
-		if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
-			DRM_ERROR("AGP Primitive list end misaligned.\n");
-			ret = 1;
-			break;
-		}
-	}
-	*buffer = buf;
-	return ret;
-}
-
-static inline enum verifier_state
-via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
-		  struct drm_via_state *hc_state)
-{
-	uint32_t cmd;
-	int hz_mode;
-	enum hazard hz;
-	const uint32_t *buf = *buffer;
-	const enum hazard *hz_table;
-
-	if ((buf_end - buf) < 2) {
-		DRM_ERROR
-		    ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
-		return state_error;
-	}
-	buf++;
-	cmd = (*buf++ & 0xFFFF0000) >> 16;
-
-	switch (cmd) {
-	case HC_ParaType_CmdVdata:
-		if (via_check_prim_list(&buf, buf_end, hc_state))
-			return state_error;
-		*buffer = buf;
-		return state_command;
-	case HC_ParaType_NotTex:
-		hz_table = table1;
-		break;
-	case HC_ParaType_Tex:
-		hc_state->texture = 0;
-		hz_table = table2;
-		break;
-	case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
-		hc_state->texture = 1;
-		hz_table = table2;
-		break;
-	case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
-		hz_table = table3;
-		break;
-	case HC_ParaType_Auto:
-		if (eat_words(&buf, buf_end, 2))
-			return state_error;
-		*buffer = buf;
-		return state_command;
-	case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
-		if (eat_words(&buf, buf_end, 32))
-			return state_error;
-		*buffer = buf;
-		return state_command;
-	case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
-	case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
-		DRM_ERROR("Texture palettes are rejected because of "
-			  "lack of info how to determine their size.\n");
-		return state_error;
-	case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
-		DRM_ERROR("Fog factor palettes are rejected because of "
-			  "lack of info how to determine their size.\n");
-		return state_error;
-	default:
-
-		/*
-		 * There are some unimplemented HC_ParaTypes here, that
-		 * need to be implemented if the Mesa driver is extended.
-		 */
-
-		DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
-			  "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
-			  cmd, *(buf - 2));
-		*buffer = buf;
-		return state_error;
-	}
-
-	while (buf < buf_end) {
-		cmd = *buf++;
-		if ((hz = hz_table[cmd >> 24])) {
-			if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
-				if (hz_mode == 1) {
-					buf--;
-					break;
-				}
-				return state_error;
-			}
-		} else if (hc_state->unfinished &&
-			   finish_current_sequence(hc_state)) {
-			return state_error;
-		}
-	}
-	if (hc_state->unfinished && finish_current_sequence(hc_state))
-		return state_error;
-	*buffer = buf;
-	return state_command;
-}
-
-static inline enum verifier_state
-via_parse_header2(struct via_device *dev_priv, uint32_t const **buffer,
-		  const uint32_t *buf_end, int *fire_count)
-{
-	uint32_t cmd;
-	const uint32_t *buf = *buffer;
-	const uint32_t *next_fire;
-	int burst = 0;
-
-	next_fire = dev_priv->fire_offsets[*fire_count];
-	buf++;
-	cmd = (*buf & 0xFFFF0000) >> 16;
-	VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
-	switch (cmd) {
-	case HC_ParaType_CmdVdata:
-		while ((buf < buf_end) &&
-		       (*fire_count < dev_priv->num_fire_offsets) &&
-		       (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
-			while (buf <= next_fire) {
-				VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
-					  (burst & 63), *buf++);
-				burst += 4;
-			}
-			if ((buf < buf_end)
-			    && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
-				buf++;
-
-			if (++(*fire_count) < dev_priv->num_fire_offsets)
-				next_fire = dev_priv->fire_offsets[*fire_count];
-		}
-		break;
-	default:
-		while (buf < buf_end) {
-
-			if (*buf == HC_HEADER2 ||
-			    (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
-			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
-			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
-				break;
-
-			VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
-				  (burst & 63), *buf++);
-			burst += 4;
-		}
-	}
-	*buffer = buf;
-	return state_command;
-}
-
-static inline int verify_mmio_address(uint32_t address)
-{
-	if ((address > 0x3FF) && (address < 0xC00)) {
-		DRM_ERROR("Invalid VIDEO DMA command. "
-			  "Attempt to access 3D- or command burst area.\n");
-		return 1;
-	} else if ((address > 0xDFF) && (address < 0x1200)) {
-		DRM_ERROR("Invalid VIDEO DMA command. "
-			  "Attempt to access PCI DMA area.\n");
-		return 1;
-	} else if (((address > 0x13FF) && (address < 0x2200)) ||
-		(address > 0x33ff)) {
-		DRM_ERROR("Invalid VIDEO DMA command. "
-			  "Attempt to access VGA registers.\n");
-		return 1;
-	}
-	return 0;
-}
-
-static inline int is_dummy_cmd(uint32_t cmd)
-{
-	if ((cmd & INV_DUMMY_MASK) == 0xCC000000 ||
-	    (cmd & INV_DUMMY_MASK) == 0xCD000000 ||
-	    (cmd & INV_DUMMY_MASK) == 0xCE000000 ||
-	    (cmd & INV_DUMMY_MASK) == 0xCF000000 ||
-	    (cmd & INV_DUMMY_MASK) == 0xDD000000)
-		return 1;
-	return 0;
-}
-
-static inline int
-verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
-		  uint32_t dwords)
-{
-	const uint32_t *buf = *buffer;
-
-	if (buf_end - buf < dwords) {
-		DRM_ERROR("Illegal termination of video command.\n");
-		return 1;
-	}
-	while (dwords--) {
-		if (*buf && !is_dummy_cmd(*buf)) {
-			DRM_ERROR("Illegal video command tail.\n");
-			return 1;
-		}
-		buf++;
-	}
-	*buffer = buf;
-	return 0;
-}
-
-static inline enum verifier_state
-via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
-{
-	uint32_t cmd;
-	const uint32_t *buf = *buffer;
-	enum verifier_state ret = state_command;
-
-	while (buf < buf_end) {
-		cmd = *buf;
-		if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
-		    (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
-			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
-				break;
-			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
-				  "Attempt to access 3D- or command burst area.\n");
-			ret = state_error;
-			break;
-		} else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
-			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
-				break;
-			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
-				  "Attempt to access VGA registers.\n");
-			ret = state_error;
-			break;
-		} else {
-			buf += 2;
-		}
-	}
-	*buffer = buf;
-	return ret;
-}
-
-static inline enum verifier_state
-via_parse_header1(struct via_device *dev_priv, uint32_t const **buffer,
-		  const uint32_t *buf_end)
-{
-	register uint32_t cmd = VIA_REG_GECMD;
-	const uint32_t *buf = *buffer;
-
-	while (buf < buf_end) {
-
-		/*
-		 * Wait idle to avoid lenghty PCI stalls.
-		 * There is no on-chip queue for these MMIO commands, so
-		 * without this idle wait, the chip will simply
-		 * stall the PCI bus until the engines are idle.
-		 */
-		if (unlikely(cmd == VIA_REG_GECMD))
-			via_wait_idle(dev_priv);
-
-		cmd = *buf;
-		if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
-			break;
-		cmd = (cmd & ~HALCYON_HEADER1MASK) << 2;
-		VIA_WRITE(cmd, *++buf);
-		buf++;
-	}
-	*buffer = buf;
-	return state_command;
-}
-
-static inline enum verifier_state
-via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
-{
-	uint32_t data;
-	const uint32_t *buf = *buffer;
-
-	if (buf_end - buf < 4) {
-		DRM_ERROR("Illegal termination of video header5 command\n");
-		return state_error;
-	}
-
-	data = *buf++ & ~VIA_VIDEOMASK;
-	if (verify_mmio_address(data))
-		return state_error;
-
-	data = *buf++;
-	if (*buf++ != 0x00F50000) {
-		DRM_ERROR("Illegal header5 header data\n");
-		return state_error;
-	}
-	if (*buf++ != 0x00000000) {
-		DRM_ERROR("Illegal header5 header data\n");
-		return state_error;
-	}
-	if (eat_words(&buf, buf_end, data))
-		return state_error;
-	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
-		return state_error;
-	*buffer = buf;
-	return state_command;
-
-}
-
-static inline enum verifier_state
-via_parse_vheader5(struct via_device *dev_priv, uint32_t const **buffer,
-		   const uint32_t *buf_end)
-{
-	uint32_t addr, count, i;
-	const uint32_t *buf = *buffer;
-
-	addr = *buf++ & ~VIA_VIDEOMASK;
-	i = count = *buf;
-	buf += 3;
-	while (i--)
-		VIA_WRITE(addr, *buf++);
-	if (count & 3)
-		buf += 4 - (count & 3);
-	*buffer = buf;
-	return state_command;
-}
-
-static inline enum verifier_state
-via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
-{
-	uint32_t data;
-	const uint32_t *buf = *buffer;
-	uint32_t i;
-
-	if (buf_end - buf < 4) {
-		DRM_ERROR("Illegal termination of video header6 command\n");
-		return state_error;
-	}
-	buf++;
-	data = *buf++;
-	if (*buf++ != 0x00F60000) {
-		DRM_ERROR("Illegal header6 header data\n");
-		return state_error;
-	}
-	if (*buf++ != 0x00000000) {
-		DRM_ERROR("Illegal header6 header data\n");
-		return state_error;
-	}
-	if ((buf_end - buf) < (data << 1)) {
-		DRM_ERROR("Illegal termination of video header6 command\n");
-		return state_error;
-	}
-	for (i = 0; i < data; ++i) {
-		if (verify_mmio_address(*buf++))
-			return state_error;
-		buf++;
-	}
-	data <<= 1;
-	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
-		return state_error;
-	*buffer = buf;
-	return state_command;
-}
-
-static inline enum verifier_state
-via_parse_vheader6(struct via_device *dev_priv, uint32_t const **buffer,
-		   const uint32_t *buf_end)
-{
-
-	uint32_t addr, count, i;
-	const uint32_t *buf = *buffer;
-
-	i = count = *++buf;
-	buf += 3;
-	while (i--) {
-		addr = *buf++;
-		VIA_WRITE(addr, *buf++);
-	}
-	count <<= 1;
-	if (count & 3)
-		buf += 4 - (count & 3);
-	*buffer = buf;
-	return state_command;
-}
-
-int
-via_verify_command_stream(const uint32_t * buf, unsigned int size,
-			  struct drm_device * dev, int agp)
-{
-
-	struct via_device *dev_priv = dev->dev_private;
-	struct drm_via_state *hc_state = &dev_priv->hc_state;
-	struct drm_via_state saved_state = *hc_state;
-	uint32_t cmd;
-	const uint32_t *buf_end = buf + (size >> 2);
-	enum verifier_state state = state_command;
-	int cme_video;
-	int supported_3d;
-
-	cme_video = (dev_priv->engine_type != VIA_ENG_H1);
-	supported_3d = (dev_priv->engine_type < VIA_ENG_H5S1);
-
-	hc_state->dev = dev;
-	hc_state->unfinished = no_sequence;
-	hc_state->agp = agp;
-	hc_state->buf_start = buf;
-	dev_priv->num_fire_offsets = 0;
-
-	while (buf < buf_end) {
-
-		switch (state) {
-		case state_header2:
-			state = via_check_header2(&buf, buf_end, hc_state);
-			break;
-		case state_header1:
-			state = via_check_header1(&buf, buf_end);
-			break;
-		case state_vheader5:
-			state = via_check_vheader5(&buf, buf_end);
-			break;
-		case state_vheader6:
-			state = via_check_vheader6(&buf, buf_end);
-			break;
-		case state_command:
-			if ((HALCYON_HEADER2 == (cmd = *buf)) &&
-			    supported_3d)
-				state = state_header2;
-			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
-				state = state_header1;
-			else if (cme_video
-				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
-				state = state_vheader5;
-			else if (cme_video
-				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
-				state = state_vheader6;
-			else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
-				DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
-				state = state_error;
-			} else {
-				DRM_ERROR
-				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
-				     cmd);
-				state = state_error;
-			}
-			break;
-		case state_error:
-		default:
-			*hc_state = saved_state;
-			return -EINVAL;
-		}
-	}
-	if (state == state_error) {
-		*hc_state = saved_state;
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int
-via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
-			 unsigned int size)
-{
-
-	struct via_device *dev_priv = dev->dev_private;
-	const uint32_t *buf_end = buf + (size >> 2);
-	enum verifier_state state = state_command;
-	int fire_count = 0;
-	uint32_t cmd;
-
-	while (buf < buf_end) {
-
-		switch (state) {
-		case state_header2:
-			state =
-			    via_parse_header2(dev_priv, &buf, buf_end,
-					      &fire_count);
-			break;
-		case state_header1:
-			state = via_parse_header1(dev_priv, &buf, buf_end);
-			break;
-		case state_vheader5:
-			state = via_parse_vheader5(dev_priv, &buf, buf_end);
-			break;
-		case state_vheader6:
-			state = via_parse_vheader6(dev_priv, &buf, buf_end);
-			break;
-		case state_command:
-			if (HALCYON_HEADER2 == (cmd = *buf))
-				state = state_header2;
-			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
-				state = state_header1;
-			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
-				state = state_vheader5;
-			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
-				state = state_vheader6;
-			else {
-				DRM_ERROR
-				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
-				     cmd);
-				state = state_error;
-			}
-			break;
-		case state_error:
-		default:
-			return -EINVAL;
-		}
-	}
-	if (state == state_error)
-		return -EINVAL;
-	return 0;
-}
-
-static void
-setup_hazard_table(struct hz_init init_table[], enum hazard table[], int size)
-{
-	int i;
-
-	for (i = 0; i < 256; ++i)
-		table[i] = forbidden_command;
-
-	for (i = 0; i < size; ++i)
-		table[init_table[i].code] = init_table[i].hz;
-}
-
-void via_init_command_verifier(void)
-{
-	setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
-	setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
-	setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
-}
diff --git a/drivers/gpu/drm/openchrome/openchrome_verifier.h b/drivers/gpu/drm/openchrome/openchrome_verifier.h
deleted file mode 100644
index bb25f0ad47ec..000000000000
--- a/drivers/gpu/drm/openchrome/openchrome_verifier.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellström 2004.
- */
-
-#ifndef _VIA_VERIFIER_H_
-#define _VIA_VERIFIER_H_
-
-#include "openchrome_3d_reg.h"
-
-enum drm_via_sequence {
-	no_sequence = 0,
-	z_address,
-	dest_address,
-	tex_address
-};
-
-struct drm_via_state {
-	unsigned texture;
-	uint32_t z_addr;
-	uint32_t d_addr;
-	uint32_t t_addr[2][10];
-	uint32_t pitch[2][10];
-	uint32_t height[2][10];
-	uint32_t tex_level_lo[2];
-	uint32_t tex_level_hi[2];
-	uint32_t tex_palette_size[2];
-	uint32_t tex_npot[2];
-	enum drm_via_sequence unfinished;
-	int agp_texture;
-	int multitex;
-	struct drm_device *dev;
-	uint32_t vertex_count;
-	int agp;
-	const uint32_t *buf_start;
-};
-
-extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
-				     struct drm_device *dev, int agp);
-extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
-				    unsigned int size);
-#endif


More information about the openchrome-devel mailing list