[PATCH 70/79] drm/i915: Remove kmap/kunmap wrappers

Chris Wilson chris at chris-wilson.co.uk
Sun Jan 1 18:51:30 UTC 2017


As these are now both plain and simple kmap_atomic/kunmap_atomic pairs,
we can remove the wrappers for a small gain of clarity (in particular,
not hiding the atomic critical sections!).

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 77 +++++++++++++++----------------------
 1 file changed, 30 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a74a5ce8665c..41233601f50c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -380,21 +380,7 @@ static void cleanup_page_dma(struct i915_address_space *vm,
 	vm_free_page(vm, p->page);
 }
 
-static void *kmap_page_dma(struct i915_page_dma *p)
-{
-	return kmap_atomic(p->page);
-}
-
-/* We use the flushing unmap only with ppgtt structures:
- * page directories, page tables and scratch pages.
- */
-static void kunmap_page_dma(void *vaddr)
-{
-	kunmap_atomic(vaddr);
-}
-
-#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(vaddr) kunmap_page_dma((vaddr))
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 
 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
@@ -405,13 +391,13 @@ static void fill_page_dma(struct i915_address_space *vm,
 			  struct i915_page_dma *p,
 			  const u64 val)
 {
-	u64 * const vaddr = kmap_page_dma(p);
+	u64 * const vaddr = kmap_atomic(p->page);
 	int i;
 
 	for (i = 0; i < 512; i++)
 		vaddr[i] = val;
 
-	kunmap_page_dma(vaddr);
+	kunmap_atomic(vaddr);
 }
 
 static void fill_page_dma_32(struct i915_address_space *vm,
@@ -644,9 +630,9 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
 	if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
 		return;
 
-	page_directorypo = kmap_px(pdp);
+	page_directorypo = kmap_atomic_px(pdp);
 	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
-	kunmap_px(page_directorypo);
+	kunmap_atomic(page_directorypo);
 }
 
 static void
@@ -655,11 +641,11 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
 				  struct i915_page_directory_pointer *pdp,
 				  int index)
 {
-	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
+	gen8_ppgtt_pml4e_t *pagemap = kmap_atomic_px(pml4);
 
 	WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
 	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
-	kunmap_px(pagemap);
+	kunmap_atomic(pagemap);
 }
 
 /* Broadwell Page Directory Pointer Descriptors */
@@ -745,10 +731,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 	if (bitmap_empty(pt->used_ptes, GEN8_PTES))
 		return true;
 
-	vaddr = kmap_px(pt);
+	vaddr = kmap_atomic_px(pt);
 	while (pte < pte_end)
 		vaddr[pte++] = scratch_pte;
-	kunmap_px(vaddr);
+	kunmap_atomic(vaddr);
 
 	return false;
 }
@@ -773,9 +759,9 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 
 		if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
 			__clear_bit(pde, pd->used_pdes);
-			pde_vaddr = kmap_px(pd);
+			pde_vaddr = kmap_atomic_px(pd);
 			pde_vaddr[pde] = scratch_pde;
-			kunmap_px(pde_vaddr);
+			kunmap_atomic(pde_vaddr);
 			free_pt(vm, pt);
 		}
 	}
@@ -808,9 +794,9 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 		if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
 			__clear_bit(pdpe, pdp->used_pdpes);
 			if (USES_FULL_48BIT_PPGTT(vm->i915)) {
-				pdpe_vaddr = kmap_px(pdp);
+				pdpe_vaddr = kmap_atomic_px(pdp);
 				pdpe_vaddr[pdpe] = scratch_pdpe;
-				kunmap_px(pdpe_vaddr);
+				kunmap_atomic(pdpe_vaddr);
 			}
 			free_pd(vm, pd);
 		}
@@ -847,9 +833,9 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
 
 		if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
 			__clear_bit(pml4e, pml4->used_pml4es);
-			pml4e_vaddr = kmap_px(pml4);
+			pml4e_vaddr = kmap_atomic_px(pml4);
 			pml4e_vaddr[pml4e] = scratch_pml4e;
-			kunmap_px(pml4e_vaddr);
+			kunmap_atomic(pml4e_vaddr);
 			free_pdp(vm, pdp);
 		}
 	}
@@ -887,7 +873,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 	bool ret = true;
 
 	pd = pdp->page_directory[pdpe];
-	vaddr = kmap_px(pd->page_table[pde]);
+	vaddr = kmap_atomic_px(pd->page_table[pde]);
 	do {
 		vaddr[pte] = pte_encode | iter->dma;
 		iter->dma += PAGE_SIZE;
@@ -908,12 +894,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 				pde = 0;
 			}
 
-			kunmap_px(vaddr);
-			vaddr = kmap_px(pd->page_table[pde]);
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic_px(pd->page_table[pde]);
 			pte = 0;
 		}
 	} while (1);
-	kunmap_px(vaddr);
+	kunmap_atomic(vaddr);
 
 	return ret;
 }
@@ -1346,7 +1332,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 	/* Allocations have completed successfully, so set the bitmaps, and do
 	 * the mappings. */
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		gen8_pde_t *const page_directory = kmap_px(pd);
+		gen8_pde_t *const page_directory = kmap_atomic_px(pd);
 		struct i915_page_table *pt;
 		uint64_t pd_len = length;
 		uint64_t pd_start = start;
@@ -1381,7 +1367,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 			 * point we're still relying on insert_entries() */
 		}
 
-		kunmap_px(page_directory);
+		kunmap_atomic(page_directory);
 		__set_bit(pdpe, pdp->used_pdpes);
 		gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
 	}
@@ -1493,7 +1479,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 			if (!test_bit(pde, pd->used_pdes))
 				continue;
 
-			pt_vaddr = kmap_px(pt);
+			pt_vaddr = kmap_atomic_px(pt);
 			for (pte = 0; pte < GEN8_PTES; pte += 4) {
 				uint64_t va =
 					(pdpe << GEN8_PDPE_SHIFT) |
@@ -1517,9 +1503,6 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 				}
 				seq_puts(m, "\n");
 			}
-			/* don't use kunmap_px, it could trigger
-			 * an unnecessary flush.
-			 */
 			kunmap_atomic(pt_vaddr);
 		}
 	}
@@ -1674,7 +1657,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 				   expected);
 		seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+		pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
 
 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
 			unsigned long va =
@@ -1697,7 +1680,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 			}
 			seq_puts(m, "\n");
 		}
-		kunmap_px(pt_vaddr);
+		kunmap_atomic(pt_vaddr);
 	}
 }
 
@@ -1889,12 +1872,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 		if (last_pte > GEN6_PTES)
 			last_pte = GEN6_PTES;
 
-		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+		pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
 
 		for (i = first_pte; i < last_pte; i++)
 			pt_vaddr[i] = scratch_pte;
 
-		kunmap_px(pt_vaddr);
+		kunmap_atomic(pt_vaddr);
 
 		num_entries -= last_pte - first_pte;
 		first_pte = 0;
@@ -1915,7 +1898,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 	struct sgt_dma iter;
 	gen6_pte_t *vaddr;
 
-	vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+	vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
 	iter.sg = pages->sgl;
 	iter.dma = sg_dma_address(iter.sg);
 	iter.max = iter.dma + iter.sg->length;
@@ -1933,12 +1916,12 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 		}
 
 		if (++act_pte == GEN6_PTES) {
-			kunmap_px(vaddr);
-			vaddr = kmap_px(ppgtt->pd.page_table[++act_pt]);
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
 			act_pte = 0;
 		}
 	} while (1);
-	kunmap_px(vaddr);
+	kunmap_atomic(vaddr);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
-- 
2.11.0



More information about the Intel-gfx-trybot mailing list