[Nouveau] [PATCH 3/6] mmu: map small pages into big pages(s) by IOMMU if possible

Vince Hsu vinceh at nvidia.com
Thu Apr 16 04:06:16 PDT 2015


This patch implements a way to aggregate the small pages and make them be
mapped as big page(s) by utilizing the platform IOMMU if supported. And then
we can enable compression support for these big pages later.

Signed-off-by: Vince Hsu <vinceh at nvidia.com>
---
 drm/nouveau/include/nvkm/subdev/mmu.h |  16 ++++
 drm/nouveau/nvkm/subdev/mmu/base.c    | 158 ++++++++++++++++++++++++++++++++--
 lib/include/nvif/os.h                 |  12 +++
 3 files changed, 179 insertions(+), 7 deletions(-)

diff --git a/drm/nouveau/include/nvkm/subdev/mmu.h b/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a5368776c31..3230d31a7971 100644
--- a/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -22,6 +22,8 @@ struct nvkm_vma {
 	struct nvkm_mm_node *node;
 	u64 offset;
 	u32 access;
+	struct list_head bp;
+	bool has_iommu_bp;
 };
 
 struct nvkm_vm {
@@ -37,6 +39,13 @@ struct nvkm_vm {
 	u32 lpde;
 };
 
+struct nvkm_vm_bp_list {
+	struct list_head head;
+	u32 pde;
+	u32 pte;
+	void *priv;
+};
+
 struct nvkm_mmu {
 	struct nvkm_subdev base;
 
@@ -45,6 +54,7 @@ struct nvkm_mmu {
 	u32 pgt_bits;
 	u8  spg_shift;
 	u8  lpg_shift;
+	bool iommu_capable;
 
 	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length,
 		       u64 mm_offset, struct nvkm_vm **);
@@ -56,7 +66,12 @@ struct nvkm_mmu {
 		    u64 phys, u64 delta);
 	void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
 		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+	void (*map_iommu)(struct nvkm_vma *, struct nvkm_gpuobj *,
+		       struct nvkm_mem *, u32 pte, dma_addr_t *, void **);
+	void (*map_sg_iommu)(struct nvkm_vma *, struct nvkm_gpuobj *,
+		       struct nvkm_mem *, u32 pte, struct sg_page_iter *, void **);
 	void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
+	void (*unmap_iommu)(struct nvkm_vma *, void *);
 	void (*flush)(struct nvkm_vm *);
 };
 
@@ -84,6 +99,7 @@ extern struct nvkm_oclass nv41_mmu_oclass;
 extern struct nvkm_oclass nv44_mmu_oclass;
 extern struct nvkm_oclass nv50_mmu_oclass;
 extern struct nvkm_oclass gf100_mmu_oclass;
+extern struct nvkm_oclass gk20a_mmu_oclass;
 
 int  nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
 		    struct nvkm_vm **);
diff --git a/drm/nouveau/nvkm/subdev/mmu/base.c b/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec04e24..747c836d9fa6 100644
--- a/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -26,6 +26,43 @@
 
 #include <core/gpuobj.h>
 
+static int
+nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type);
+
+static int
+nvkm_vm_link_bp(struct nvkm_vma *vma, u32 pde, u32 pte,
+	struct nvkm_vm_pgt *vpgt, void *priv)
+{
+	struct nvkm_vm *vm = vma->vm;
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_vm_bp_list *list;
+	list = kzalloc(sizeof(*list), GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+
+	mutex_lock(&nv_subdev(mmu)->mutex);
+
+	if (!vma->has_iommu_bp) {
+		INIT_LIST_HEAD(&vma->bp);
+		vma->has_iommu_bp = true;
+	}
+	list->pde = pde;
+	list->pte = pte;
+	list->priv = priv;
+	list_add_tail(&list->head, &vma->bp);
+
+	mutex_unlock(&nv_subdev(mmu)->mutex);
+
+	return 0;
+}
+
+static void
+nvkm_vm_unlink_bp(struct nvkm_vma *vma, struct nvkm_vm_bp_list *list)
+{
+	list_del(&list->head);
+	kfree(list);
+}
+
 void
 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
 {
@@ -129,6 +166,48 @@ finish:
 }
 
 static void
+nvkm_vm_map_sg_table_with_iommu(struct nvkm_vma *vma, u64 delta, u64 length,
+		     struct nvkm_mem *mem)
+{
+	struct nvkm_vm *vm = vma->vm;
+	struct nvkm_mmu *mmu = vm->mmu;
+	int big = vma->node->type != mmu->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->pgt_bits - bits);
+	struct sg_page_iter iter;
+	u32 bpoff, i;
+	u32 multiple = 1 << bits;
+
+	i = 0;
+	for_each_sg_page(mem->sg->sgl, &iter, mem->sg->nents, 0) {
+		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		void *priv;
+
+		bpoff = offset + i;
+
+		pde = (bpoff >> mmu->pgt_bits) - vm->fpde;
+		pte = (bpoff & ((1 << mmu->pgt_bits) - 1)) >> bits;
+		pgt = vm->pgt[pde].obj[1];
+
+		mmu->map_sg_iommu(vma, pgt, mem, pte, &iter, &priv);
+
+		nvkm_vm_link_bp(vma, pde, pte, &vm->pgt[pde], priv);
+
+		i += multiple;
+		pte++;
+		if (unlikely(pte >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	mmu->flush(vm);
+}
+
+static void
 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
 	       struct nvkm_mem *mem)
 {
@@ -166,15 +245,59 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
 	mmu->flush(vm);
 }
 
+static void
+nvkm_vm_map_sg_with_iommu(struct nvkm_vma *vma, u64 delta, u64 length,
+	       struct nvkm_mem *mem)
+{
+	struct nvkm_vm *vm = vma->vm;
+	struct nvkm_mmu *mmu = vm->mmu;
+	dma_addr_t *list = mem->pages;
+	int big = vma->node->type != mmu->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 multiple = 1 << bits;
+
+	while (num) {
+		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		void *priv;
+
+		mmu->map_iommu(vma, pgt, mem, pte, list, &priv);
+
+		nvkm_vm_link_bp(vma, pde, pte, &vm->pgt[pde], priv);
+
+		list += multiple;
+		num--;
+		pte++;
+		if (unlikely(pte >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	mmu->flush(vm);
+}
+
 void
 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
 {
-	if (node->sg)
-		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
-	else
-	if (node->pages)
-		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
-	else
+	struct nvkm_vm *vm = vma->vm;
+	struct nvkm_mmu *mmu = vm->mmu;
+
+	if (node->sg) {
+		if (mmu->iommu_capable && vma->node->type == mmu->lpg_shift)
+			nvkm_vm_map_sg_table_with_iommu(vma, 0, node->size << 12, node);
+		else
+			nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
+	} else if (node->pages) {
+		if (mmu->iommu_capable && vma->node->type == mmu->lpg_shift)
+			nvkm_vm_map_sg_with_iommu(vma, 0, node->size << 12, node);
+		else
+			nvkm_vm_map_sg(vma, 0, node->size << 12, node);
+	} else
 		nvkm_vm_map_at(vma, 0, node);
 }
 
@@ -214,9 +337,30 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
 }
 
 void
+nvkm_vm_unmap_iommu(struct nvkm_vma *vma)
+{
+	struct nvkm_vm *vm = vma->vm;
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_vm_bp_list *list, *tmp;
+
+	list_for_each_entry_safe(list, tmp, &vma->bp, head) {
+		struct nvkm_gpuobj *pgt = vm->pgt[list->pde].obj[1];
+
+		mmu->unmap(pgt, list->pte, 1);
+		mmu->unmap_iommu(vma, list->priv);
+		nvkm_vm_unlink_bp(vma, list);
+	}
+
+	vma->has_iommu_bp = false;
+}
+
+void
 nvkm_vm_unmap(struct nvkm_vma *vma)
 {
-	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+	if (vma->has_iommu_bp)
+		nvkm_vm_unmap_iommu(vma);
+	else
+		nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
 }
 
 static void
diff --git a/lib/include/nvif/os.h b/lib/include/nvif/os.h
index 275fa84ad003..f56a5e5d3a4d 100644
--- a/lib/include/nvif/os.h
+++ b/lib/include/nvif/os.h
@@ -88,6 +88,7 @@ typedef dma_addr_t resource_size_t;
 #define likely(a) (a)
 #define unlikely(a) (a)
 #define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
 
 #define ERR_PTR(err) ((void *)(long)(err))
 #define PTR_ERR(ptr) ((long)(ptr))
@@ -914,6 +915,17 @@ struct sg_table {
 #define sg_dma_address(a) 0ULL
 #define sg_dma_len(a) 0ULL
 
+struct sg_page_iter {
+};
+
+#define sg_page_iter_dma_address(struct sg_page_iter *piter) 0ULL
+
+#define for_each_sg_page(sglist, piter, nents, pgoffset)		   \
+	for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
+	     __sg_page_iter_next(piter);)
+#define __sg_page_iter_start(a) (a)
+#define __sg_page_iter_next(a) (false)
+
 /******************************************************************************
  * firmware
  *****************************************************************************/
-- 
2.1.4



More information about the Nouveau mailing list