[PATCH 12/12] nouveau/ttm/dma: Enable the TTM DMA pool if device can only do 32-bit DMA.

j.glisse at gmail.com j.glisse at gmail.com
Mon Nov 7 15:40:32 PST 2011


From: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>

If the card is capable of more than 32-bit, then use the default
TTM page pool code which allocates from anywhere in the memory.

Note: If the 'ttm.no_dma' parameter is set, the override is ignored
and the default TTM pool is used.

CC: Ben Skeggs <bskeggs at redhat.com>
CC: Francisco Jerez <currojerez at riseup.net>
CC: Dave Airlie <airlied at redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
Reviewed-by: Jerome Glisse <jglisse at redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c      |   73 ++++++++++++++++++++++++++++-
 drivers/gpu/drm/nouveau/nouveau_debugfs.c |    1 +
 drivers/gpu/drm/nouveau/nouveau_sgdma.c   |   60 +-----------------------
 3 files changed, 73 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7e5ca3f..36234a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1049,10 +1049,79 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
 	nouveau_fence_unref(&old_fence);
 }
 
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct drm_nouveau_private *dev_priv;
+	struct drm_device *dev;
+	unsigned i;
+	int r;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	dev_priv = nouveau_bdev(ttm->bdev);
+	dev = dev_priv->dev;
+
+#ifdef CONFIG_SWIOTLB
+	if ((dma_get_mask(dev->dev) <= DMA_BIT_MASK(32)) && swiotlb_nr_tbl()) {
+		return ttm_dma_populate(ttm, dev->dev);
+	}
+#endif
+
+	r = ttm_page_alloc_ttm_tt_populate(ttm);
+	if (r) {
+		return r;
+	}
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+						   0, PAGE_SIZE,
+						   PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
+			while (--i) {
+				pci_unmap_page(dev->pdev, ttm->dma_address[i],
+					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				ttm->dma_address[i] = 0;
+			}
+			ttm_page_alloc_ttm_tt_unpopulate(ttm);
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct drm_nouveau_private *dev_priv;
+	struct drm_device *dev;
+	unsigned i;
+
+	dev_priv = nouveau_bdev(ttm->bdev);
+	dev = dev_priv->dev;
+
+#ifdef CONFIG_SWIOTLB
+	if ((dma_get_mask(dev->dev) <= DMA_BIT_MASK(32)) && swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(ttm, dev->dev);
+		return;
+	}
+#endif
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		if (ttm->dma_address[i]) {
+			pci_unmap_page(dev->pdev, ttm->dma_address[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+	}
+
+	ttm_page_alloc_ttm_tt_unpopulate(ttm);
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_create = &nouveau_ttm_tt_create,
-	.ttm_tt_populate = &ttm_page_alloc_ttm_tt_populate,
-	.ttm_tt_unpopulate = &ttm_page_alloc_ttm_tt_unpopulate,
+	.ttm_tt_populate = &nouveau_ttm_tt_populate,
+	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
 	.invalidate_caches = nouveau_bo_invalidate_caches,
 	.init_mem_type = nouveau_bo_init_mem_type,
 	.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e15923..f52c2db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
 	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
 	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
 	{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+	{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index bc2ab90..ee1eb7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -13,41 +13,6 @@ struct nouveau_sgdma_be {
 	u64 offset;
 };
 
-static int
-nouveau_sgdma_dma_map(struct ttm_tt *ttm)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_device *dev = nvbe->dev;
-	int i;
-
-	for (i = 0; i < ttm->num_pages; i++) {
-		ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
-						   0, PAGE_SIZE,
-						   PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static void
-nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_device *dev = nvbe->dev;
-	int i;
-
-	for (i = 0; i < ttm->num_pages; i++) {
-		if (ttm->dma_address[i]) {
-			pci_unmap_page(dev->pdev, ttm->dma_address[i],
-				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		}
-		ttm->dma_address[i] = 0;
-	}
-}
-
 static void
 nouveau_sgdma_destroy(struct ttm_tt *ttm)
 {
@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
 	unsigned i, j, pte;
-	int r;
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
-	r = nouveau_sgdma_dma_map(ttm);
-	if (r) {
-		return r;
-	}
 
 	nvbe->offset = mem->start << PAGE_SHIFT;
 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm)
 			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
 	}
 
-	nouveau_sgdma_dma_unmap(ttm);
 	return 0;
 }
 
@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 	dma_addr_t *list = ttm->dma_address;
 	u32 pte = mem->start << 2;
 	u32 cnt = ttm->num_pages;
-	int r;
 
 	nvbe->offset = mem->start << PAGE_SHIFT;
-	r = nouveau_sgdma_dma_map(ttm);
-	if (r) {
-		return r;
-	}
 
 	while (cnt--) {
 		nv_wo32(pgt, pte, (*list++ >> 7) | 1);
@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm)
 	}
 
 	nv41_sgdma_flush(nvbe);
-	nouveau_sgdma_dma_unmap(ttm);
 	return 0;
 }
 
@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 	dma_addr_t *list = ttm->dma_address;
 	u32 pte = mem->start << 2, tmp[4];
 	u32 cnt = ttm->num_pages;
-	int i, r;
+	int i;
 
 	nvbe->offset = mem->start << PAGE_SHIFT;
-	r = nouveau_sgdma_dma_map(ttm);
-	if (r) {
-		return r;
-	}
 
 	if (pte & 0x0000000c) {
 		u32  max = 4 - ((pte >> 2) & 0x3);
@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm)
 		nv44_sgdma_fill(pgt, NULL, pte, cnt);
 
 	nv44_sgdma_flush(ttm);
-	nouveau_sgdma_dma_unmap(ttm);
 	return 0;
 }
 
@@ -335,13 +283,8 @@ static int
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
 	struct nouveau_mem *node = mem->mm_node;
-	int r;
 
 	/* noop: bound in move_notify() */
-	r = nouveau_sgdma_dma_map(ttm);
-	if (r) {
-		return r;
-	}
 	node->pages = ttm->dma_address;
 	return 0;
 }
@@ -350,7 +293,6 @@ static int
 nv50_sgdma_unbind(struct ttm_tt *ttm)
 {
 	/* noop: unbound in move_notify() */
-	nouveau_sgdma_dma_unmap(ttm);
 	return 0;
 }
 
-- 
1.7.7.1



More information about the dri-devel mailing list