[Nouveau] [PATCH 2/2] drm/ttm: remove io_reserve_lru handling

Christian König ckoenig.leichtzumerken at gmail.com
Mon Sep 30 13:12:54 UTC 2019


That is not used any more.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c      |  37 +++----------
 drivers/gpu/drm/ttm/ttm_bo_util.c | 114 +-------------------------------------
 drivers/gpu/drm/ttm/ttm_bo_vm.c   |  33 +++--------
 include/drm/ttm/ttm_bo_api.h      |   5 --
 include/drm/ttm/ttm_bo_driver.h   |  14 -----
 5 files changed, 20 insertions(+), 183 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6394e0c5cc02..324468d0870f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -328,13 +328,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 	int ret = 0;
 
 	if (old_is_pci || new_is_pci ||
-	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
-		ret = ttm_mem_io_lock(old_man, true);
-		if (unlikely(ret != 0))
-			goto out_err;
-		ttm_bo_unmap_virtual_locked(bo);
-		ttm_mem_io_unlock(old_man);
-	}
+	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
+		ttm_bo_unmap_virtual(bo);
 
 	/*
 	 * Create and bind a ttm if required.
@@ -670,15 +665,12 @@ static void ttm_bo_release(struct kref *kref)
 	struct ttm_buffer_object *bo =
 	    container_of(kref, struct ttm_buffer_object, kref);
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
 	if (bo->bdev->driver->release_notify)
 		bo->bdev->driver->release_notify(bo);
 
 	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
-	ttm_mem_io_lock(man, false);
-	ttm_mem_io_free_vm(bo);
-	ttm_mem_io_unlock(man);
+	ttm_mem_io_free(bdev, &bo->mem);
 	ttm_bo_cleanup_refs_or_queue(bo);
 	kref_put(&bo->list_kref, ttm_bo_release_list);
 }
@@ -727,8 +719,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
-	evict_mem.bus.io_reserved_vm = false;
-	evict_mem.bus.io_reserved_count = 0;
+	evict_mem.bus.base = 0;
 
 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
 	if (ret) {
@@ -1188,8 +1179,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
-	mem.bus.io_reserved_vm = false;
-	mem.bus.io_reserved_count = 0;
+	mem.bus.base = 0;
 	/*
 	 * Determine where to move the buffer.
 	 */
@@ -1336,8 +1326,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	bo->mem.num_pages = bo->num_pages;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
-	bo->mem.bus.io_reserved_vm = false;
-	bo->mem.bus.io_reserved_count = 0;
+	bo->mem.bus.base = 0;
 	bo->moving = NULL;
 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
 	bo->acc_size = acc_size;
@@ -1788,22 +1777,12 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 	return true;
 }
 
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-
-	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
-	ttm_mem_io_free_vm(bo);
-}
-
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
-	ttm_mem_io_lock(man, false);
-	ttm_bo_unmap_virtual_locked(bo);
-	ttm_mem_io_unlock(man);
+	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+	ttm_mem_io_free(bdev, &bo->mem);
 }
 
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2eca752c39e9..7df89134b50c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -91,124 +91,30 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
-{
-	if (likely(man->io_reserve_fastpath))
-		return 0;
-
-	if (interruptible)
-		return mutex_lock_interruptible(&man->io_reserve_mutex);
-
-	mutex_lock(&man->io_reserve_mutex);
-	return 0;
-}
-
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
-{
-	if (likely(man->io_reserve_fastpath))
-		return;
-
-	mutex_unlock(&man->io_reserve_mutex);
-}
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
-	struct ttm_buffer_object *bo;
-
-	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
-		return -EAGAIN;
-
-	bo = list_first_entry(&man->io_reserve_lru,
-			      struct ttm_buffer_object,
-			      io_reserve_lru);
-	list_del_init(&bo->io_reserve_lru);
-	ttm_bo_unmap_virtual_locked(bo);
-
-	return 0;
-}
-
-
 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
 		       struct ttm_mem_reg *mem)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	int ret = 0;
-
 	if (!bdev->driver->io_mem_reserve)
 		return 0;
-	if (likely(man->io_reserve_fastpath))
-		return bdev->driver->io_mem_reserve(bdev, mem);
-
-	if (bdev->driver->io_mem_reserve &&
-	    mem->bus.io_reserved_count++ == 0) {
-retry:
-		ret = bdev->driver->io_mem_reserve(bdev, mem);
-		if (ret == -EAGAIN) {
-			ret = ttm_mem_io_evict(man);
-			if (ret == 0)
-				goto retry;
-		}
-	}
-	return ret;
+
+	return bdev->driver->io_mem_reserve(bdev, mem);
 }
 
 void ttm_mem_io_free(struct ttm_bo_device *bdev,
 		     struct ttm_mem_reg *mem)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
-	if (likely(man->io_reserve_fastpath))
-		return;
-
-	if (bdev->driver->io_mem_reserve &&
-	    --mem->bus.io_reserved_count == 0 &&
-	    bdev->driver->io_mem_free)
+	if (bdev->driver->io_mem_free)
 		bdev->driver->io_mem_free(bdev, mem);
-
-}
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
-	struct ttm_mem_reg *mem = &bo->mem;
-	int ret;
-
-	if (!mem->bus.io_reserved_vm) {
-		struct ttm_mem_type_manager *man =
-			&bo->bdev->man[mem->mem_type];
-
-		ret = ttm_mem_io_reserve(bo->bdev, mem);
-		if (unlikely(ret != 0))
-			return ret;
-		mem->bus.io_reserved_vm = true;
-		if (man->use_io_reserve_lru)
-			list_add_tail(&bo->io_reserve_lru,
-				      &man->io_reserve_lru);
-	}
-	return 0;
-}
-
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
-{
-	struct ttm_mem_reg *mem = &bo->mem;
-
-	if (mem->bus.io_reserved_vm) {
-		mem->bus.io_reserved_vm = false;
-		list_del_init(&bo->io_reserve_lru);
-		ttm_mem_io_free(bo->bdev, mem);
-	}
 }
 
 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
-	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bdev, mem);
-	ttm_mem_io_unlock(man);
 	if (ret || !mem->bus.is_iomem)
 		return ret;
 
@@ -220,9 +126,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
 		else
 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
 		if (!addr) {
-			(void) ttm_mem_io_lock(man, false);
 			ttm_mem_io_free(bdev, mem);
-			ttm_mem_io_unlock(man);
 			return -ENOMEM;
 		}
 	}
@@ -239,9 +143,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 
 	if (virtual && mem->bus.addr == NULL)
 		iounmap(virtual);
-	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(bdev, mem);
-	ttm_mem_io_unlock(man);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -616,8 +518,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_mem_type_manager *man =
-		&bo->bdev->man[bo->mem.mem_type];
 	unsigned long offset, size;
 	int ret;
 
@@ -628,9 +528,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 	if (start_page > bo->num_pages)
 		return -EINVAL;
 
-	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
-	ttm_mem_io_unlock(man);
 	if (ret)
 		return ret;
 	if (!bo->mem.bus.is_iomem) {
@@ -645,10 +543,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);
 
 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_buffer_object *bo = map->bo;
-	struct ttm_mem_type_manager *man =
-		&bo->bdev->man[bo->mem.mem_type];
-
 	if (!map->virtual)
 		return;
 	switch (map->bo_kmap_type) {
@@ -666,9 +560,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 	default:
 		BUG();
 	}
-	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
-	ttm_mem_io_unlock(man);
 	map->virtual = NULL;
 	map->page = NULL;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ad485d49e19c..1fec54c5fd60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -193,8 +193,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	pgoff_t i;
 	vm_fault_t ret = VM_FAULT_NOPAGE;
 	unsigned long address = vmf->address;
-	struct ttm_mem_type_manager *man =
-		&bdev->man[bo->mem.mem_type];
 
 	/*
 	 * Refuse to fault imported pages. This should be handled
@@ -233,24 +231,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	if (unlikely(ret != 0))
 		return ret;
 
-	err = ttm_mem_io_lock(man, true);
+	err = ttm_mem_io_reserve(bdev, &bo->mem);
 	if (unlikely(err != 0))
-		return VM_FAULT_NOPAGE;
-	err = ttm_mem_io_reserve_vm(bo);
-	if (unlikely(err != 0)) {
-		ret = VM_FAULT_SIGBUS;
-		goto out_io_unlock;
-	}
+		return VM_FAULT_SIGBUS;
 
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
 	page_last = vma_pages(vma) + vma->vm_pgoff -
 		drm_vma_node_start(&bo->base.vma_node);
 
-	if (unlikely(page_offset >= bo->num_pages)) {
-		ret = VM_FAULT_SIGBUS;
-		goto out_io_unlock;
-	}
+	if (unlikely(page_offset >= bo->num_pages))
+		return VM_FAULT_SIGBUS;
 
 	cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
 	if (!bo->mem.bus.is_iomem) {
@@ -262,10 +253,8 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		};
 
 		ttm = bo->ttm;
-		if (ttm_tt_populate(bo->ttm, &ctx)) {
-			ret = VM_FAULT_OOM;
-			goto out_io_unlock;
-		}
+		if (ttm_tt_populate(bo->ttm, &ctx))
+			return VM_FAULT_OOM;
 	} else {
 		/* Iomem should not be marked encrypted */
 		cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
@@ -281,8 +270,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		} else {
 			page = ttm->pages[page_offset];
 			if (unlikely(!page && i == 0)) {
-				ret = VM_FAULT_OOM;
-				goto out_io_unlock;
+				return VM_FAULT_OOM;
 			} else if (unlikely(!page)) {
 				break;
 			}
@@ -300,7 +288,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		/* Never error on prefaulted PTEs */
 		if (unlikely((ret & VM_FAULT_ERROR))) {
 			if (i == 0)
-				goto out_io_unlock;
+				return VM_FAULT_NOPAGE;
 			else
 				break;
 		}
@@ -309,10 +297,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		if (unlikely(++page_offset >= page_last))
 			break;
 	}
-	ret = VM_FAULT_NOPAGE;
-out_io_unlock:
-	ttm_mem_io_unlock(man);
-	return ret;
+	return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2e308b896d2f..c79ef02450f7 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -62,8 +62,6 @@ struct ttm_lru_bulk_move;
  * @is_iomem:		is this io memory ?
  * @size:		size in byte
  * @offset:		offset from the base address
- * @io_reserved_vm:     The VM system has a refcount in @io_reserved_count
- * @io_reserved_count:  Refcounting the numbers of callers to ttm_mem_io_reserve
  *
  * Structure indicating the bus placement of an object.
  */
@@ -73,11 +71,8 @@ struct ttm_bus_placement {
 	unsigned long	size;
 	unsigned long	offset;
 	bool		is_iomem;
-	bool		io_reserved_vm;
-	uint64_t        io_reserved_count;
 };
 
-
 /**
  * struct ttm_mem_reg
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6f536caea368..ecf0ceee205b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -615,20 +615,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
  */
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-
 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
 
-- 
2.14.1



More information about the Nouveau mailing list