[Openchrome-devel] drm-openchrome: Branch 'drm-next-3.19' - 4 commits - drivers/gpu/drm
Kevin Brace
kevinbrace at kemper.freedesktop.org
Tue May 1 04:00:32 UTC 2018
drivers/gpu/drm/openchrome/via_crtc.c | 28 -
drivers/gpu/drm/openchrome/via_drv.h | 4
drivers/gpu/drm/openchrome/via_ttm.c | 681 ++++++++++++++++++----------------
3 files changed, 377 insertions(+), 336 deletions(-)
New commits:
commit 62d029c6db50b09a15a479b4c472621acb73b8a7
Author: Kevin Brace <kevinbrace at gmx.com>
Date: Mon Apr 30 20:53:31 2018 -0700
drm/openchrome: Version bumped to 3.0.81
VX855 / VX875 chipset (Chrome9 HCM) hardware cursor fix and
miscellaneous clean ups.
Signed-off-by: Kevin Brace <kevinbrace at gmx.com>
diff --git a/drivers/gpu/drm/openchrome/via_drv.h b/drivers/gpu/drm/openchrome/via_drv.h
index 73071fcc22c9..4709b5b683a4 100644
--- a/drivers/gpu/drm/openchrome/via_drv.h
+++ b/drivers/gpu/drm/openchrome/via_drv.h
@@ -30,11 +30,11 @@
#define DRIVER_AUTHOR "OpenChrome Project"
#define DRIVER_NAME "openchrome"
#define DRIVER_DESC "OpenChrome DRM for VIA Technologies Chrome IGP"
-#define DRIVER_DATE "20180329"
+#define DRIVER_DATE "20180430"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 80
+#define DRIVER_PATCHLEVEL 81
#include <linux/module.h>
commit b10340a9845f8f537bd94685a34ebb31706c70bc
Author: Kevin Brace <kevinbrace at gmx.com>
Date: Mon Apr 30 20:20:51 2018 -0700
drm/openchrome: Remove entered / exited messages for hardware cursor
The entered / exited messages for hardware cursor cause too much
kern.log message pollution.
Signed-off-by: Kevin Brace <kevinbrace at gmx.com>
diff --git a/drivers/gpu/drm/openchrome/via_crtc.c b/drivers/gpu/drm/openchrome/via_crtc.c
index c5a3ddee6e0f..6434a6c91b3c 100644
--- a/drivers/gpu/drm/openchrome/via_crtc.c
+++ b/drivers/gpu/drm/openchrome/via_crtc.c
@@ -156,8 +156,6 @@ static void via_hide_cursor(struct drm_crtc *crtc)
struct via_device *dev_priv = crtc->dev->dev_private;
uint32_t temp;
- DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
switch (dev->pdev->device) {
case PCI_DEVICE_ID_VIA_PM800:
case PCI_DEVICE_ID_VIA_VT3157:
@@ -180,8 +178,6 @@ static void via_hide_cursor(struct drm_crtc *crtc)
VIA_WRITE(HI_CONTROL, temp & 0xFFFFFFFA);
break;
}
-
- DRM_DEBUG_KMS("Exiting %s.\n", __func__);
}
static void via_show_cursor(struct drm_crtc *crtc)
@@ -190,8 +186,6 @@ static void via_show_cursor(struct drm_crtc *crtc)
struct via_crtc *iga = container_of(crtc, struct via_crtc, base);
struct via_device *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
switch (dev->pdev->device) {
case PCI_DEVICE_ID_VIA_PM800:
case PCI_DEVICE_ID_VIA_VT3157:
@@ -248,8 +242,6 @@ static void via_show_cursor(struct drm_crtc *crtc)
break;
}
-
- DRM_DEBUG_KMS("Exiting %s.\n", __func__);
}
static void via_cursor_address(struct drm_crtc *crtc)
@@ -258,10 +250,8 @@ static void via_cursor_address(struct drm_crtc *crtc)
struct via_crtc *iga = container_of(crtc, struct via_crtc, base);
struct via_device *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
if (!iga->cursor_kmap.bo) {
- goto exit;
+ return;
}
switch (dev->pdev->device) {
@@ -283,9 +273,6 @@ static void via_cursor_address(struct drm_crtc *crtc)
VIA_WRITE(HI_FBOFFSET, iga->cursor_kmap.bo->offset);
break;
}
-
-exit:
- DRM_DEBUG_KMS("Exiting %s.\n", __func__);
}
static int via_crtc_cursor_set(struct drm_crtc *crtc,
@@ -300,8 +287,6 @@ static int via_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_gem_object *obj = NULL;
struct ttm_bo_kmap_obj user_kmap;
- DRM_DEBUG_KMS("Entered %s.\n", __func__);
-
if (!iga->cursor_kmap.bo)
return -ENXIO;
@@ -350,7 +335,6 @@ static int via_crtc_cursor_set(struct drm_crtc *crtc,
via_cursor_address(crtc);
via_show_cursor(crtc);
- DRM_DEBUG_KMS("Exiting %s.\n", __func__);
return ret;
}
commit 9dddceab145375a19d0c3b92bbb6c6e8e3e29124
Author: Kevin Brace <kevinbrace at gmx.com>
Date: Mon Apr 30 20:11:11 2018 -0700
drm/openchrome: Fix for VX855 / VX875 chipset hardware cursor display
Due to confusing VX855 / VX875 chipset PCI device ID labels for
graphics, hardware cursor was not being displayed only on VX855 /
VX875 chipset. The code was tested on Wyse Cx0 thin client.
Signed-off-by: Kevin Brace <kevinbrace at gmx.com>
diff --git a/drivers/gpu/drm/openchrome/via_crtc.c b/drivers/gpu/drm/openchrome/via_crtc.c
index ea559b04468a..c5a3ddee6e0f 100644
--- a/drivers/gpu/drm/openchrome/via_crtc.c
+++ b/drivers/gpu/drm/openchrome/via_crtc.c
@@ -164,7 +164,7 @@ static void via_hide_cursor(struct drm_crtc *crtc)
case PCI_DEVICE_ID_VIA_VT3343:
case PCI_DEVICE_ID_VIA_P4M900:
case PCI_DEVICE_ID_VIA_VT1122:
- case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX875:
case PCI_DEVICE_ID_VIA_VX900_VGA:
if (iga->index) {
temp = VIA_READ(HI_CONTROL);
@@ -198,7 +198,7 @@ static void via_show_cursor(struct drm_crtc *crtc)
case PCI_DEVICE_ID_VIA_VT3343:
case PCI_DEVICE_ID_VIA_P4M900:
case PCI_DEVICE_ID_VIA_VT1122:
- case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX875:
case PCI_DEVICE_ID_VIA_VX900_VGA:
/* Program Hardware Icon (HI) FIFO, foreground, and
* background colors. */
@@ -229,7 +229,7 @@ static void via_show_cursor(struct drm_crtc *crtc)
case PCI_DEVICE_ID_VIA_VT3343:
case PCI_DEVICE_ID_VIA_P4M900:
case PCI_DEVICE_ID_VIA_VT1122:
- case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX875:
case PCI_DEVICE_ID_VIA_VX900_VGA:
/* Turn on Hardware icon Cursor */
if (iga->index) {
@@ -270,7 +270,7 @@ static void via_cursor_address(struct drm_crtc *crtc)
case PCI_DEVICE_ID_VIA_VT3343:
case PCI_DEVICE_ID_VIA_P4M900:
case PCI_DEVICE_ID_VIA_VT1122:
- case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX875:
case PCI_DEVICE_ID_VIA_VX900_VGA:
/* Program the HI offset. */
if (iga->index) {
@@ -378,7 +378,7 @@ static int via_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
case PCI_DEVICE_ID_VIA_VT3343:
case PCI_DEVICE_ID_VIA_P4M900:
case PCI_DEVICE_ID_VIA_VT1122:
- case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX875:
case PCI_DEVICE_ID_VIA_VX900_VGA:
if (iga->index) {
VIA_WRITE(HI_POSSTART, ((xpos << 16) | (ypos & 0x07ff)));
commit 71f807e35d2e94a0e4f4fafaa780828d6d98b0a7
Author: Kevin Brace <kevinbrace at gmx.com>
Date: Thu Apr 26 19:05:23 2018 -0700
drm/openchrome: Indentation fix for via_ttm.c
Signed-off-by: Kevin Brace <kevinbrace at gmx.com>
diff --git a/drivers/gpu/drm/openchrome/via_ttm.c b/drivers/gpu/drm/openchrome/via_ttm.c
index 5f498a85b54c..16113e512a5d 100644
--- a/drivers/gpu/drm/openchrome/via_ttm.c
+++ b/drivers/gpu/drm/openchrome/via_ttm.c
@@ -20,6 +20,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+
#include <linux/dma-mapping.h>
#ifdef CONFIG_SWIOTLB
#include <linux/swiotlb.h>
@@ -29,85 +30,82 @@
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-static int
-via_ttm_mem_global_init(struct drm_global_reference *ref)
+static int via_ttm_mem_global_init(struct drm_global_reference *ref)
{
- return ttm_mem_global_init(ref->object);
+ return ttm_mem_global_init(ref->object);
}
-static void
-via_ttm_mem_global_release(struct drm_global_reference *ref)
+static void via_ttm_mem_global_release(struct drm_global_reference *ref)
{
- ttm_mem_global_release(ref->object);
+ ttm_mem_global_release(ref->object);
}
-static int
-via_ttm_global_init(struct via_device *dev_priv)
+static int via_ttm_global_init(struct via_device *dev_priv)
{
- struct drm_global_reference *global_ref;
- struct drm_global_reference *bo_ref;
- int rc;
-
- global_ref = &dev_priv->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &via_ttm_mem_global_init;
- global_ref->release = &via_ttm_mem_global_release;
-
- rc = drm_global_item_ref(global_ref);
- if (unlikely(rc != 0)) {
- DRM_ERROR("Failed setting up TTM memory accounting\n");
- global_ref->release = NULL;
- return rc;
- }
-
- dev_priv->ttm.bo_global_ref.mem_glob = dev_priv->ttm.mem_global_ref.object;
- bo_ref = &dev_priv->ttm.bo_global_ref.ref;
- bo_ref->global_type = DRM_GLOBAL_TTM_BO;
- bo_ref->size = sizeof(struct ttm_bo_global);
- bo_ref->init = &ttm_bo_global_init;
- bo_ref->release = &ttm_bo_global_release;
-
- rc = drm_global_item_ref(bo_ref);
- if (unlikely(rc != 0)) {
- DRM_ERROR("Failed setting up TTM BO subsystem\n");
- drm_global_item_unref(global_ref);
- global_ref->release = NULL;
- return rc;
- }
-
- return rc;
+ struct drm_global_reference *global_ref;
+ struct drm_global_reference *bo_ref;
+ int rc;
+
+ global_ref = &dev_priv->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &via_ttm_mem_global_init;
+ global_ref->release = &via_ttm_mem_global_release;
+
+ rc = drm_global_item_ref(global_ref);
+ if (unlikely(rc != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting\n");
+ global_ref->release = NULL;
+ return rc;
+ }
+
+ dev_priv->ttm.bo_global_ref.mem_glob =
+ dev_priv->ttm.mem_global_ref.object;
+ bo_ref = &dev_priv->ttm.bo_global_ref.ref;
+ bo_ref->global_type = DRM_GLOBAL_TTM_BO;
+ bo_ref->size = sizeof(struct ttm_bo_global);
+ bo_ref->init = &ttm_bo_global_init;
+ bo_ref->release = &ttm_bo_global_release;
+
+ rc = drm_global_item_ref(bo_ref);
+ if (unlikely(rc != 0)) {
+ DRM_ERROR("Failed setting up TTM BO subsystem\n");
+ drm_global_item_unref(global_ref);
+ global_ref->release = NULL;
+ return rc;
+ }
+
+ return rc;
}
-static void
-via_ttm_global_release(struct drm_global_reference *global_ref,
- struct ttm_bo_global_ref *global_bo,
- struct ttm_bo_device *bdev)
+static void via_ttm_global_release(struct drm_global_reference *global_ref,
+ struct ttm_bo_global_ref *global_bo,
+ struct ttm_bo_device *bdev)
{
- DRM_DEBUG_KMS("Entered %s.\n", __func__);
+ DRM_DEBUG_KMS("Entered %s.\n", __func__);
- if (global_ref->release == NULL)
- return;
+ if (global_ref->release == NULL)
+ return;
- drm_global_item_unref(&global_bo->ref);
- drm_global_item_unref(global_ref);
- global_ref->release = NULL;
+ drm_global_item_unref(&global_bo->ref);
+ drm_global_item_unref(global_ref);
+ global_ref->release = NULL;
- DRM_DEBUG_KMS("Exiting %s.\n", __func__);
+ DRM_DEBUG_KMS("Exiting %s.\n", __func__);
}
-static void
-via_ttm_bo_destroy(struct ttm_buffer_object *bo)
+static void via_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
- struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
+ struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
- kfree(heap);
- heap = NULL;
+ kfree(heap);
+ heap = NULL;
}
-static struct ttm_tt *
-via_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags, struct page *dummy_read_page)
+static struct ttm_tt* via_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct via_device *dev_priv = container_of(bdev,
struct via_device, ttm.bdev);
@@ -125,8 +123,7 @@ via_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
}
-static int
-via_ttm_tt_populate(struct ttm_tt *ttm)
+static int via_ttm_tt_populate(struct ttm_tt *ttm)
{
struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
struct ttm_dma_tt *sgdma = &dma_tt->sgdma;
@@ -154,13 +151,17 @@ via_ttm_tt_populate(struct ttm_tt *ttm)
return ret;
for (i = 0; i < ttm->num_pages; i++) {
- sgdma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, sgdma->dma_address[i])) {
+ sgdma->dma_address[i] = pci_map_page(dev->pdev,
+ ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev,
+ sgdma->dma_address[i])) {
while (--i) {
- pci_unmap_page(dev->pdev, sgdma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_page(dev->pdev,
+ sgdma->dma_address[i],
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
sgdma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
@@ -170,8 +171,7 @@ via_ttm_tt_populate(struct ttm_tt *ttm)
return ret;
}
-static void
-via_ttm_tt_unpopulate(struct ttm_tt *ttm)
+static void via_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct sgdma_tt *dma_tt = (struct sgdma_tt *) ttm;
struct ttm_dma_tt *sgdma = &dma_tt->sgdma;
@@ -197,15 +197,16 @@ via_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (sgdma->dma_address[i]) {
pci_unmap_page(dev->pdev, sgdma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
}
-static int
-via_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int via_invalidate_caches(struct ttm_bo_device *bdev,
+ uint32_t flags)
{
/*
* FIXME: Invalidate texture caches here.
@@ -213,9 +214,9 @@ via_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
return 0;
}
-static int
-via_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
+static int via_init_mem_type(struct ttm_bo_device *bdev,
+ uint32_t type,
+ struct ttm_mem_type_manager *man)
{
#if IS_ENABLED(CONFIG_AGP)
struct via_device *dev_priv = container_of(bdev,
@@ -234,9 +235,10 @@ via_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
- /* By default we handle PCI/PCIe DMA. If AGP is avaliable
- * then we use that instead */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+ /* By default we handle PCI/PCIe DMA.
+ * If AGP is avaliable then we use that instead */
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
@@ -244,7 +246,8 @@ via_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP) &&
dev->agp != NULL) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
} else
DRM_ERROR("AGP is possible but not enabled\n");
@@ -254,53 +257,65 @@ via_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- /* The display base address does not always equal the start of
- * the memory region of the VRAM. In our case it is */
+ /* The display base address does not always equal
+ * the start of the memory region of the VRAM.
+ * In our case it is */
man->gpu_offset = 0;
break;
case TTM_PL_PRIV0:
/* MMIO region */
man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ DRM_ERROR("Unsupported memory type %u\n",
+ (unsigned)type);
return -EINVAL;
}
+
return 0;
}
-static void
-via_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement)
+static void via_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- ttm_placement_from_domain(bo, placement, TTM_PL_FLAG_TT | TTM_PL_FLAG_SYSTEM, bo->bdev);
+ ttm_placement_from_domain(bo, placement,
+ TTM_PL_FLAG_TT | TTM_PL_FLAG_SYSTEM,
+ bo->bdev);
break;
case TTM_PL_TT:
default:
- ttm_placement_from_domain(bo, placement, TTM_PL_FLAG_SYSTEM, bo->bdev);
+ ttm_placement_from_domain(bo,
+ placement,
+ TTM_PL_FLAG_SYSTEM,
+ bo->bdev);
break;
}
}
/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps
- * track of the pages we allocate. We don't want to use kmalloc for the descriptor
- * chain because it may be quite large for some blits, and pages don't need to be
- * contingous.
+ * Allocate DMA capable memory for the blit descriptor chain,
+ * and an array that keeps track of the pages we allocate. We don't
+ * want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
*/
-struct drm_via_sg_info *
-via_alloc_desc_pages(struct ttm_tt *ttm, struct drm_device *dev,
- unsigned long dev_start, enum dma_data_direction direction)
+struct drm_via_sg_info* via_alloc_desc_pages(struct ttm_tt *ttm,
+ struct drm_device *dev,
+ unsigned long dev_start,
+ enum dma_data_direction direction)
{
struct drm_via_sg_info *vsg = kzalloc(sizeof(*vsg), GFP_KERNEL);
struct via_device *dev_priv = dev->dev_private;
@@ -311,10 +326,12 @@ via_alloc_desc_pages(struct ttm_tt *ttm, struct drm_device *dev,
vsg->direction = direction;
vsg->num_desc = ttm->num_pages;
vsg->descriptors_per_page = PAGE_SIZE / desc_size;
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->num_desc_pages = (vsg->num_desc +
+ vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
- vsg->desc_pages = kzalloc(vsg->num_desc_pages * sizeof(void *), GFP_KERNEL);
+ vsg->desc_pages = kzalloc(vsg->num_desc_pages *
+ sizeof(void *), GFP_KERNEL);
if (!vsg->desc_pages)
return ERR_PTR(-ENOMEM);
@@ -322,18 +339,22 @@ via_alloc_desc_pages(struct ttm_tt *ttm, struct drm_device *dev,
/* Alloc pages for descriptor chain */
for (i = 0; i < vsg->num_desc_pages; ++i) {
- vsg->desc_pages[i] = (unsigned long *) __get_free_page(GFP_KERNEL);
+ vsg->desc_pages[i] =
+ (unsigned long *) __get_free_page(GFP_KERNEL);
if (!vsg->desc_pages[i])
return ERR_PTR(-ENOMEM);
}
+
return vsg;
}
/* Move between GART and VRAM */
-static int
-via_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem, struct ttm_mem_reg *old_mem)
+static int via_move_blit(struct ttm_buffer_object *bo,
+ bool evict,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct via_device *dev_priv = container_of(bo->bdev,
struct via_device, ttm.bdev);
@@ -359,19 +380,25 @@ via_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu,
if (dev_addr & 0x0F)
return ret;
- vsg = via_alloc_desc_pages(bo->ttm, dev_priv->dev, dev_addr, direction);
+ vsg = via_alloc_desc_pages(bo->ttm, dev_priv->dev,
+ dev_addr, direction);
if (unlikely(IS_ERR(vsg)))
return PTR_ERR(vsg);
fence = via_fence_create_and_emit(dev_priv->dma_fences, vsg, 0);
if (unlikely(IS_ERR(fence)))
return PTR_ERR(fence);
- return ttm_bo_move_accel_cleanup(bo, (void *)fence, evict, no_wait_gpu, new_mem);
+ return ttm_bo_move_accel_cleanup(bo,
+ (void *)fence,
+ evict,
+ no_wait_gpu,
+ new_mem);
}
-static int
-via_move_from_vram(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+static int via_move_from_vram(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
@@ -414,9 +441,10 @@ out_cleanup:
return ret;
}
-static int
-via_move_to_vram(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+static int via_move_to_vram(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
@@ -450,44 +478,49 @@ out_cleanup:
return ret;
}
-static int
-via_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+static int via_bo_move(struct ttm_buffer_object *bo,
+ bool evict,
+ bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg *old_mem = &bo->mem;
int ret = 0;
DRM_DEBUG_KMS("Entered %s.\n", __func__);
- if ((old_mem->mem_type == TTM_PL_SYSTEM) && (!bo->ttm)) {
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
- goto exit;
- }
+ if ((old_mem->mem_type == TTM_PL_SYSTEM) && (!bo->ttm)) {
+ BUG_ON(old_mem->mm_node != NULL);
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ goto exit;
+ }
/* No real memory copy. Just use the new_mem
* directly. */
- if (((old_mem->mem_type == TTM_PL_SYSTEM)
- && (new_mem->mem_type == TTM_PL_TT))
- || ((old_mem->mem_type == TTM_PL_TT)
- && (new_mem->mem_type == TTM_PL_SYSTEM))
- || (new_mem->mem_type == TTM_PL_PRIV0)) {
+ if (((old_mem->mem_type == TTM_PL_SYSTEM) &&
+ (new_mem->mem_type == TTM_PL_TT)) ||
+ ((old_mem->mem_type == TTM_PL_TT) &&
+ (new_mem->mem_type == TTM_PL_SYSTEM)) ||
+ (new_mem->mem_type == TTM_PL_PRIV0)) {
BUG_ON(old_mem->mm_node != NULL);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- goto exit;
+ goto exit;
}
/* Accelerated copy involving the VRAM. */
- if ((old_mem->mem_type == TTM_PL_VRAM)
- && (new_mem->mem_type == TTM_PL_SYSTEM)) {
- ret = via_move_from_vram(bo, interruptible, no_wait_gpu, new_mem);
- } else if ((old_mem->mem_type == TTM_PL_SYSTEM)
- && (new_mem->mem_type == TTM_PL_VRAM)) {
- ret = via_move_to_vram(bo, interruptible, no_wait_gpu, new_mem);
+ if ((old_mem->mem_type == TTM_PL_VRAM) &&
+ (new_mem->mem_type == TTM_PL_SYSTEM)) {
+ ret = via_move_from_vram(bo, interruptible,
+ no_wait_gpu, new_mem);
+ } else if ((old_mem->mem_type == TTM_PL_SYSTEM) &&
+ (new_mem->mem_type == TTM_PL_VRAM)) {
+ ret = via_move_to_vram(bo, interruptible,
+ no_wait_gpu, new_mem);
} else {
- ret = via_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ ret = via_move_blit(bo, evict, no_wait_gpu,
+ new_mem, old_mem);
}
if (ret) {
@@ -499,12 +532,11 @@ exit:
return ret;
}
-static int
-via_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int via_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct via_device *dev_priv = container_of(bdev,
struct via_device, ttm.bdev);
-
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_device *dev = dev_priv->dev;
@@ -526,7 +558,8 @@ via_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
- mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+ mem->bus.is_iomem =
+ !dev->agp->cant_use_aperture;
mem->bus.base = dev->agp->base;
}
#endif
@@ -546,14 +579,17 @@ via_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
default:
return -EINVAL;
}
+
return 0;
}
-static void via_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static void via_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
}
-static int via_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+static int via_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
{
return 0;
}
@@ -573,92 +609,101 @@ static struct ttm_bo_driver via_bo_driver = {
int via_mm_init(struct via_device *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
- struct ttm_buffer_object *bo;
- unsigned long long start;
- int len;
- int ret;
+ struct drm_device *dev = dev_priv->dev;
+ struct ttm_buffer_object *bo;
+ unsigned long long start;
+ int len;
+ int ret;
DRM_DEBUG_KMS("Entered %s.\n", __func__);
- ret = via_ttm_global_init(dev_priv);
+ ret = via_ttm_global_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialise TTM: %d\n", ret);
- goto exit;
+ DRM_ERROR("Failed to initialise TTM: %d\n", ret);
+ goto exit;
}
dev_priv->ttm.bdev.dev_mapping = dev->anon_inode->i_mapping;
- ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
- dev_priv->ttm.bo_global_ref.ref.object,
- &via_bo_driver,
- dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
- false);
- if (ret) {
- DRM_ERROR("Error initialising bo driver: %d\n", ret);
- goto exit;
- }
-
- ret = ttm_bo_init_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM, dev_priv->vram_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed to map video RAM: %d\n", ret);
- goto exit;
- }
-
- /* Add an MTRR for the video RAM. */
- dev_priv->vram_mtrr = arch_phys_wc_add(dev_priv->vram_start, dev_priv->vram_size);
-
- DRM_INFO("Mapped %llu MB of video RAM at physical address 0x%08llx.\n",
- (unsigned long long) dev_priv->vram_size >> 20, dev_priv->vram_start);
-
- start = (unsigned long long) pci_resource_start(dev->pdev, 1);
- len = pci_resource_len(dev->pdev, 1);
- ret = ttm_bo_init_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0, len >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed to map MMIO: %d\n", ret);
- goto exit;
- }
-
- ret = via_bo_create(&dev_priv->ttm.bdev, &bo, VIA_MMIO_REGSIZE, ttm_bo_type_kernel,
- TTM_PL_FLAG_PRIV0, 1, PAGE_SIZE, false, NULL, NULL);
- if (ret) {
- DRM_ERROR("Failed to create a buffer object for MMIO: %d\n", ret);
- goto exit;
- }
-
- ret = via_bo_pin(bo, &dev_priv->mmio);
- if (ret) {
- DRM_ERROR("Failed to map a buffer object for MMIO: %d\n", ret);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
- goto exit;
- }
-
- DRM_INFO("Mapped MMIO at physical address 0x%08llx.\n",
- start);
+ ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
+ dev_priv->ttm.bo_global_ref.ref.object,
+ &via_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ false);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ttm_bo_init_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM,
+ dev_priv->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed to map video RAM: %d\n", ret);
+ goto exit;
+ }
+
+ /* Add an MTRR for the video RAM. */
+ dev_priv->vram_mtrr = arch_phys_wc_add(dev_priv->vram_start,
+ dev_priv->vram_size);
+
+ DRM_INFO("Mapped %llu MB of video RAM at physical "
+ "address 0x%08llx.\n",
+ (unsigned long long) dev_priv->vram_size >> 20,
+ dev_priv->vram_start);
+
+ start = (unsigned long long) pci_resource_start(dev->pdev, 1);
+ len = pci_resource_len(dev->pdev, 1);
+ ret = ttm_bo_init_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0,
+ len >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed to map MMIO: %d\n", ret);
+ goto exit;
+ }
+
+ ret = via_bo_create(&dev_priv->ttm.bdev, &bo,
+ VIA_MMIO_REGSIZE, ttm_bo_type_kernel,
+ TTM_PL_FLAG_PRIV0, 1, PAGE_SIZE,
+ false, NULL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to create a buffer object "
+ "for MMIO: %d\n", ret);
+ goto exit;
+ }
+
+ ret = via_bo_pin(bo, &dev_priv->mmio);
+ if (ret) {
+ DRM_ERROR("Failed to map a buffer object "
+ "for MMIO: %d\n", ret);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
+ goto exit;
+ }
+
+ DRM_INFO("Mapped MMIO at physical address 0x%08llx.\n",
+ start);
exit:
DRM_DEBUG_KMS("Exiting %s.\n", __func__);
- return ret;
+ return ret;
}
void via_mm_fini(struct drm_device *dev)
{
- struct via_device *dev_priv = dev->dev_private;
+ struct via_device *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("Entered %s.\n", __func__);
- ttm_bo_device_release(&dev_priv->ttm.bdev);
+ ttm_bo_device_release(&dev_priv->ttm.bdev);
- via_ttm_global_release(&dev_priv->ttm.mem_global_ref,
- &dev_priv->ttm.bo_global_ref,
- &dev_priv->ttm.bdev);
+ via_ttm_global_release(&dev_priv->ttm.mem_global_ref,
+ &dev_priv->ttm.bo_global_ref,
+ &dev_priv->ttm.bdev);
- /* mtrr delete the vram */
- if (dev_priv->vram_mtrr >= 0) {
- arch_phys_wc_del(dev_priv->vram_mtrr);
- }
+ /* mtrr delete the vram */
+ if (dev_priv->vram_mtrr >= 0) {
+ arch_phys_wc_del(dev_priv->vram_mtrr);
+ }
- dev_priv->vram_mtrr = 0;
+ dev_priv->vram_mtrr = 0;
DRM_DEBUG_KMS("Exiting %s.\n", __func__);
}
@@ -666,31 +711,36 @@ void via_mm_fini(struct drm_device *dev)
/*
* the buffer object domain
*/
-void
-ttm_placement_from_domain(struct ttm_buffer_object *bo, struct ttm_placement *placement, u32 domains,
- struct ttm_bo_device *bdev)
+void ttm_placement_from_domain(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ u32 domains,
+ struct ttm_bo_device *bdev)
{
- struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
- int cnt = 0, i = 0;
-
- if (!(domains & TTM_PL_MASK_MEM))
- domains = TTM_PL_FLAG_SYSTEM;
-
- do {
- int domain = (domains & (1 << i));
-
- if (domain) {
- heap->busy_placements[cnt].flags = (domain | bdev->man[i].default_caching);
- heap->busy_placements[cnt].fpfn = heap->busy_placements[cnt].lpfn = 0;
- heap->placements[cnt].flags = (domain | bdev->man[i].available_caching);
- heap->placements[cnt].fpfn = heap->placements[cnt].lpfn = 0;
- cnt++;
- }
- } while (i++ < TTM_NUM_MEM_TYPES);
-
- placement->num_busy_placement = placement->num_placement = cnt;
- placement->busy_placement = heap->busy_placements;
- placement->placement = heap->placements;
+ struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
+ int cnt = 0, i = 0;
+
+ if (!(domains & TTM_PL_MASK_MEM))
+ domains = TTM_PL_FLAG_SYSTEM;
+
+ do {
+ int domain = (domains & (1 << i));
+
+ if (domain) {
+ heap->busy_placements[cnt].flags =
+ (domain | bdev->man[i].default_caching);
+ heap->busy_placements[cnt].fpfn =
+ heap->busy_placements[cnt].lpfn = 0;
+ heap->placements[cnt].flags =
+ (domain | bdev->man[i].available_caching);
+ heap->placements[cnt].fpfn =
+ heap->placements[cnt].lpfn = 0;
+ cnt++;
+ }
+ } while (i++ < TTM_NUM_MEM_TYPES);
+
+ placement->num_busy_placement = placement->num_placement = cnt;
+ placement->busy_placement = heap->busy_placements;
+ placement->placement = heap->placements;
}
int via_bo_create(struct ttm_bo_device *bdev,
@@ -704,108 +754,115 @@ int via_bo_create(struct ttm_bo_device *bdev,
struct sg_table *sg,
struct reservation_object *resv)
{
- struct ttm_buffer_object *bo = NULL;
- struct ttm_placement placement;
- struct ttm_heap *heap;
- size_t acc_size;
- int ret = -ENOMEM;
+ struct ttm_buffer_object *bo = NULL;
+ struct ttm_placement placement;
+ struct ttm_heap *heap;
+ size_t acc_size;
+ int ret = -ENOMEM;
DRM_DEBUG_KMS("Entered %s.\n", __func__);
- size = round_up(size, byte_alignment);
- size = ALIGN(size, page_alignment);
+ size = round_up(size, byte_alignment);
+ size = ALIGN(size, page_alignment);
- heap = kzalloc(sizeof(struct ttm_heap), GFP_KERNEL);
- if (unlikely(!heap)) {
- DRM_ERROR("Failed to allocate kernel memory.");
- goto exit;
- }
+ heap = kzalloc(sizeof(struct ttm_heap), GFP_KERNEL);
+ if (unlikely(!heap)) {
+ DRM_ERROR("Failed to allocate kernel memory.");
+ goto exit;
+ }
- bo = &heap->bo;
+ bo = &heap->bo;
- ttm_placement_from_domain(bo, &placement, domains, bdev);
+ ttm_placement_from_domain(bo, &placement, domains, bdev);
- acc_size = ttm_bo_dma_acc_size(bdev, size,
- sizeof(struct ttm_heap));
+ acc_size = ttm_bo_dma_acc_size(bdev, size,
+ sizeof(struct ttm_heap));
- ret = ttm_bo_init(bdev, bo, size, type, &placement,
- page_alignment >> PAGE_SHIFT,
- interruptible, NULL, acc_size,
- sg, NULL, via_ttm_bo_destroy);
+ ret = ttm_bo_init(bdev, bo, size, type, &placement,
+ page_alignment >> PAGE_SHIFT,
+ interruptible, NULL, acc_size,
+ sg, NULL, via_ttm_bo_destroy);
- if (unlikely(ret)) {
- DRM_ERROR("Failed to initialize a TTM Buffer Object.");
- goto error;
- }
+ if (unlikely(ret)) {
+ DRM_ERROR("Failed to initialize a TTM Buffer Object.");
+ goto error;
+ }
- *p_bo = bo;
- goto exit;
+ *p_bo = bo;
+ goto exit;
error:
- kfree(heap);
+ kfree(heap);
exit:
DRM_DEBUG_KMS("Exiting %s.\n", __func__);
- return ret;
+ return ret;
}
-int
-via_bo_pin(struct ttm_buffer_object *bo, struct ttm_bo_kmap_obj *kmap)
+int via_bo_pin(struct ttm_buffer_object *bo,
+ struct ttm_bo_kmap_obj *kmap)
{
- struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
- struct ttm_placement placement;
- int ret;
-
- ret = ttm_bo_reserve(bo, true, false, false, 0);
- if (!ret) {
- placement.placement = heap->placements;
- placement.num_placement = 1;
-
- heap->placements[0].flags = (bo->mem.placement | TTM_PL_FLAG_NO_EVICT);
- ret = ttm_bo_validate(bo, &placement, false, false);
- if (!ret && kmap)
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, kmap);
- ttm_bo_unreserve(bo);
- }
- return ret;
+ struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
+ struct ttm_placement placement;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (!ret) {
+ placement.placement = heap->placements;
+ placement.num_placement = 1;
+
+ heap->placements[0].flags = (bo->mem.placement |
+ TTM_PL_FLAG_NO_EVICT);
+ ret = ttm_bo_validate(bo, &placement, false, false);
+ if (!ret && kmap)
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, kmap);
+ ttm_bo_unreserve(bo);
+ }
+
+ return ret;
}
-int
-via_bo_unpin(struct ttm_buffer_object *bo, struct ttm_bo_kmap_obj *kmap)
+int via_bo_unpin(struct ttm_buffer_object *bo,
+ struct ttm_bo_kmap_obj *kmap)
{
- struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
- struct ttm_placement placement;
- int ret;
-
- ret = ttm_bo_reserve(bo, true, false, false, 0);
- if (!ret) {
- if (kmap)
- ttm_bo_kunmap(kmap);
-
- placement.placement = heap->placements;
- placement.num_placement = 1;
-
- heap->placements[0].flags = (bo->mem.placement & ~TTM_PL_FLAG_NO_EVICT);
- ret = ttm_bo_validate(bo, &placement, false, false);
- ttm_bo_unreserve(bo);
- }
- return ret;
+ struct ttm_heap *heap = container_of(bo, struct ttm_heap, bo);
+ struct ttm_placement placement;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (!ret) {
+ if (kmap)
+ ttm_bo_kunmap(kmap);
+
+ placement.placement = heap->placements;
+ placement.num_placement = 1;
+
+ heap->placements[0].flags = (bo->mem.placement &
+ ~TTM_PL_FLAG_NO_EVICT);
+ ret = ttm_bo_validate(bo, &placement, false, false);
+ ttm_bo_unreserve(bo);
+ }
+
+ return ret;
}
-int
-via_ttm_allocate_kernel_buffer(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t alignment, uint32_t domain,
- struct ttm_bo_kmap_obj *kmap)
+int via_ttm_allocate_kernel_buffer(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t alignment,
+ uint32_t domain,
+ struct ttm_bo_kmap_obj *kmap)
{
- int ret = via_bo_create(bdev, &kmap->bo, size,
- ttm_bo_type_kernel, domain,
- alignment, PAGE_SIZE,
- false, NULL, NULL);
- if (likely(!ret)) {
- ret = via_bo_pin(kmap->bo, kmap);
- if (unlikely(ret)) {
- DRM_ERROR("failed to mmap the buffer\n");
- ttm_bo_unref(&kmap->bo);
- kmap->bo = NULL;
- }
- }
- return ret;
+ int ret = via_bo_create(bdev, &kmap->bo, size,
+ ttm_bo_type_kernel, domain,
+ alignment, PAGE_SIZE,
+ false, NULL, NULL);
+
+ if (likely(!ret)) {
+ ret = via_bo_pin(kmap->bo, kmap);
+ if (unlikely(ret)) {
+ DRM_ERROR("failed to mmap the buffer\n");
+ ttm_bo_unref(&kmap->bo);
+ kmap->bo = NULL;
+ }
+ }
+
+ return ret;
}
More information about the Openchrome-devel
mailing list