[Intel-xe] [PATCH v4 1/2] drm/xe/xe_migrate.c: Use DPA offset for page table entries.

Niranjana Vishwanathapura niranjana.vishwanathapura at intel.com
Fri Oct 6 17:12:56 UTC 2023


On Thu, Oct 05, 2023 at 06:00:39PM -0400, David Kershner wrote:
>Device Physical Address (DPA) is the starting offset device memory.
>
>Update xe_migrate identity map base PTE entries to start at dpa_base
>instead of 0.
>
>The VM offset value should be 0 relative instead of DPA relative.
>
>Signed-off-by: David Kershner <david.kershner at intel.com>

LGTM.
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>

>---
> drivers/gpu/drm/xe/xe_migrate.c | 27 +++++++++++++++++----------
> 1 file changed, 17 insertions(+), 10 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
>index 15f091a7bba3..db0e8d87a286 100644
>--- a/drivers/gpu/drm/xe/xe_migrate.c
>+++ b/drivers/gpu/drm/xe/xe_migrate.c
>@@ -114,8 +114,13 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
> 	return (slot + 1ULL) << xe_pt_shift(level + 1);
> }
>
>-static u64 xe_migrate_vram_ofs(u64 addr)
>+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
> {
>+	/*
>+	 * Remove the DPA to get a correct offset into identity table for the
>+	 * migrate offset
>+	 */
>+	addr -= xe->mem.vram.dpa_base;
> 	return addr + (256ULL << xe_pt_shift(2));
> }
>
>@@ -149,7 +154,7 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
>
> 	xe_map_memset(xe, &m->cleared_bo->vmap, 0, 0x00, cleared_size);
> 	vram_addr = xe_bo_addr(m->cleared_bo, 0, XE_PAGE_SIZE);
>-	m->cleared_vram_ofs = xe_migrate_vram_ofs(vram_addr);
>+	m->cleared_vram_ofs = xe_migrate_vram_ofs(xe, vram_addr);
>
> 	return 0;
> }
>@@ -225,12 +230,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
> 	} else {
> 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
>
>-		m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
>+		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
>
> 		if (xe->info.supports_usm) {
> 			batch = tile->primary_gt->usm.bb_pool->bo;
> 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
>-			m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
>+			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
> 		}
> 	}
>
>@@ -268,7 +273,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
> 		 * Use 1GB pages, it shouldn't matter the physical amount of
> 		 * vram is less, when we don't access it.
> 		 */
>-		for (pos = 0; pos < xe->mem.vram.actual_physical_size; pos += SZ_1G, ofs += 8)
>+		for (pos = xe->mem.vram.dpa_base;
>+		     pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
>+		     pos += SZ_1G, ofs += 8)
> 			xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
> 	}
>
>@@ -443,8 +450,8 @@ static u32 pte_update_size(struct xe_migrate *m,
> 		cmds += cmd_size;
> 	} else {
> 		/* Offset into identity map. */
>-		*L0_ofs = xe_migrate_vram_ofs(cur->start +
>-					      vram_region_gpu_offset(res));
>+		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
>+					      cur->start + vram_region_gpu_offset(res));
> 		cmds += cmd_size;
> 	}
>
>@@ -1060,10 +1067,10 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
> 	 * pages are used. Hence the assert.
> 	 */
> 	xe_tile_assert(tile, update->qwords <= 0x1ff);
>-	if (!ppgtt_ofs) {
>-		ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
>+	if (!ppgtt_ofs)
>+		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
>+						xe_bo_addr(update->pt_bo, 0,
> 							   XE_PAGE_SIZE));
>-	}
>
> 	do {
> 		u64 addr = ppgtt_ofs + ofs * 8;
>-- 
>2.35.1
>


More information about the Intel-xe mailing list