[Intel-xe] [PATCH 1/3] drm/xe: s/XE_PTE_READ_ONLY/XE_PTE_FLAG_READ_ONLY

Matthew Brost matthew.brost at intel.com
Thu Jun 8 05:03:31 UTC 2023


This define is for internal PTE flags rather than fields in the hardware
PTEs, rename as such. This will help in an upcoming patch to avoid
further confusion.

Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
 drivers/gpu/drm/xe/xe_bo.h |  2 +-
 drivers/gpu/drm/xe/xe_pt.c |  2 +-
 drivers/gpu/drm/xe/xe_vm.c | 12 +++++++-----
 3 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 29eb7474f018..552fe073e9c5 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -65,7 +65,7 @@
 #define XE_PAGE_PRESENT			BIT_ULL(0)
 #define XE_PAGE_RW			BIT_ULL(1)
 
-#define XE_PTE_READ_ONLY		BIT(0)
+#define XE_PTE_FLAG_READ_ONLY		BIT(0)
 
 #define XE_PL_SYSTEM		TTM_PL_SYSTEM
 #define XE_PL_TT		TTM_PL_TT
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index bef265715000..39ec94549439 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -102,7 +102,7 @@ static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
 {
 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
 
-	if (unlikely(flags & XE_PTE_READ_ONLY))
+	if (unlikely(flags & XE_PTE_FLAG_READ_ONLY))
 		pte &= ~XE_PAGE_RW;
 
 	/* FIXME: I don't think the PPAT handling is correct for MTL */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d1c380ad7f6b..94fc9c330235 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -61,7 +61,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	bool in_kthread = !current->mm;
 	unsigned long notifier_seq;
 	int pinned, ret, i;
-	bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
+	bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
 
 	lockdep_assert_held(&vm->lock);
 	XE_BUG_ON(!xe_vma_is_userptr(vma));
@@ -869,7 +869,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
 	vma->start = start;
 	vma->end = end;
 	if (read_only)
-		vma->pte_flags = XE_PTE_READ_ONLY;
+		vma->pte_flags = XE_PTE_FLAG_READ_ONLY;
 
 	if (tile_mask) {
 		vma->tile_mask = tile_mask;
@@ -923,7 +923,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 {
 	struct xe_vm *vm = vma->vm;
 	struct xe_device *xe = vm->xe;
-	bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
+	bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
 
 	if (xe_vma_is_userptr(vma)) {
 		if (vma->userptr.sg) {
@@ -2641,7 +2641,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
 					  first->userptr.ptr,
 					  first->start,
 					  lookup->start - 1,
-					  (first->pte_flags & XE_PTE_READ_ONLY),
+					  (first->pte_flags &
+					   XE_PTE_FLAG_READ_ONLY),
 					  first->tile_mask);
 		if (first->bo)
 			xe_bo_unlock(first->bo, &ww);
@@ -2672,7 +2673,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
 					 last->userptr.ptr + chunk,
 					 last->start + chunk,
 					 last->end,
-					 (last->pte_flags & XE_PTE_READ_ONLY),
+					 (last->pte_flags &
+					  XE_PTE_FLAG_READ_ONLY),
 					 last->tile_mask);
 		if (last->bo)
 			xe_bo_unlock(last->bo, &ww);
-- 
2.34.1



More information about the Intel-xe mailing list