[PATCH 2/3] iova32

Chris Wilson chris at chris-wilson.co.uk
Sat Jan 16 12:21:25 UTC 2021


---
 drivers/iommu/iova.c | 28 ++++++++++++++++++++++++----
 1 file changed, 24 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 3a808d67acca..84028a26f12b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -187,20 +187,39 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	unsigned long flags;
 	unsigned long new_pfn, retry_pfn;
 	unsigned long align_mask = ~0UL;
-	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
+	unsigned long high_pfn, low_pfn;
 
 	if (size_aligned)
 		align_mask <<= fls_long(size - 1);
 
 	/* Walk the tree backwards */
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-	if (limit_pfn <= iovad->dma_32bit_pfn &&
-			size >= iovad->max32_alloc_size)
-		goto iova32_full;
+	if (limit_pfn <= iovad->dma_32bit_pfn)
+
+	if (limit_pfn <= iovad->dma_32bit_pfn) {
+		if (size >= iovad->max32_alloc_size)
+			goto iova32_full;
+
+		curr = &iovad->anchor.node;
+		curr_iova = rb_entry(curr, struct iova, node);
+
+		do {
+			low_pfn = curr_iova->pfn_hi + 1;
+			new_pfn = (low_pfn + ~align_mask) & align_mask;
+			high_pfn = new_pfn + size;
+			prev = curr;
+			curr = rb_next(curr);
+			curr_iova = rb_entry(curr, struct iova, node);
+		} while (curr && high_pfn > curr_iova->pfn_lo && high_pfn <= limit_pfn);
+		if (high_pfn <= limit_pfn)
+			goto done;
+	}
 
 	curr = __get_cached_rbnode(iovad, limit_pfn);
 	curr_iova = rb_entry(curr, struct iova, node);
 	retry_pfn = curr_iova->pfn_hi + 1;
+	high_pfn = limit_pfn;
+	low_pfn = iovad->start_pfn;
 
 retry:
 	do {
@@ -223,6 +242,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 		goto iova32_full;
 	}
 
+done:
 	/* pfn_lo will point to size aligned address if size_aligned is set */
 	new->pfn_lo = new_pfn;
 	new->pfn_hi = new->pfn_lo + size - 1;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list