[PATCH 4/4] iova-dbg2

Chris Wilson chris at chris-wilson.co.uk
Fri Jan 15 10:55:28 UTC 2021


---
 drivers/iommu/iova.c | 30 +++++++++++++++++++++++++++---
 1 file changed, 27 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 10e411b0edaf..bfcb04874cd1 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -195,8 +195,11 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	/* Walk the tree backwards */
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 	if (limit_pfn <= iovad->dma_32bit_pfn &&
-			size >= iovad->max32_alloc_size)
+			size >= iovad->max32_alloc_size) {
+		pr_err("%s size:%lu, limit_pfn:%lu, 32bit_pfn:%lu, 32bit_max:%lu\n",
+		       __func__, size, limit_pfn, iovad->dma_32bit_pfn, iovad->max32_alloc_size);
 		goto iova32_full;
+	}
 
 	curr = __get_cached_rbnode(iovad, limit_pfn);
 	curr_iova = rb_entry(curr, struct iova, node);
@@ -212,6 +215,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
 
 	if (high_pfn < size || new_pfn < low_pfn) {
+		pr_err("%s retry? first?%d, retry_pfn:%lu, limit_pfn:%lu\n",
+		       __func__, low_pfn == iovad->start_pfn,
+		       retry_pfn, limit_pfn);
 		if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
 			high_pfn = limit_pfn;
 			low_pfn = retry_pfn;
@@ -219,7 +225,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 			curr_iova = rb_entry(curr, struct iova, node);
 			goto retry;
 		}
-		iovad->max32_alloc_size = size;
+		iovad->max32_alloc_size =
+		       min(iovad->max32_alloc_size, size);
 		goto iova32_full;
 	}
 
@@ -235,7 +242,22 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	return 0;
 
 iova32_full:
+	{
+		int count = 0;
+		high_pfn = limit_pfn;
+		curr = &iovad->anchor.node;
+		curr_iova = rb_entry(curr, struct iova, node);
+		do {
+			high_pfn = min(high_pfn, curr_iova->pfn_lo);
+			pr_err("[%d]: high:%lu\n", count, high_pfn);
+			curr = rb_prev(curr);
+			curr_iova = rb_entry(curr, struct iova, node);
+		} while (curr && count++ < 16);
+	}
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+	pr_err("%s size:%lu, 32bit_pfn:%lu, 32bit_max:%lu, low_pfn=%lu, high_pfn=%lu, retry_pfn=%lu, limit_pfn:%lu, start:%lu\n",
+	       __func__, size, iovad->dma_32bit_pfn, iovad->max32_alloc_size,
+	       low_pfn, high_pfn, retry_pfn, limit_pfn, iovad->start_pfn);
 	return -ENOMEM;
 }
 
@@ -309,8 +331,10 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
 	int ret;
 
 	new_iova = alloc_iova_mem();
-	if (!new_iova)
+	if (!new_iova) {
+		pr_err("alloc_iova_mem\n");
 		return NULL;
+	}
 
 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
 			new_iova, size_aligned);
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list