[Intel-xe] [PATCH v2 1/6] drm/xe: Avoid doing rebinds
Matthew Brost
matthew.brost at intel.com
Thu Jul 20 04:10:52 UTC 2023
If we dont change page sizes we can avoid doing rebinds rather just do a
partial unbind. The algorithm to determine its page size is greedy as we
assume all pages in the removed VMA are the largest page used in the
VMA.
v2: Don't exceed 100 lines
v3: struct xe_vma_op_unmap remove in different patch, remove XXX comment
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_pt.c | 1 +
drivers/gpu/drm/xe/xe_vm.c | 72 +++++++++++++++++++++++++++-----
drivers/gpu/drm/xe/xe_vm_types.h | 7 ++++
3 files changed, 70 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 851ea7c01b91..bbf37e5c8abb 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -668,6 +668,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (!is_null)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
+ xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
*action = ACTION_CONTINUE;
return ret;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 87907a97a8c3..19ce6b0c6564 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2384,6 +2384,16 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
return vma;
}
+static u64 xe_vma_max_pte_size(struct xe_vma *vma)
+{
+ if (vma->gpuva.flags & XE_VMA_PTE_1G)
+ return SZ_1G;
+ else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ return SZ_2M;
+
+ return SZ_4K;
+}
+
/*
* Parse operations list and create any resources needed for the operations
* prior to fully committing to the operations. This setup can fail.
@@ -2460,6 +2470,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
break;
}
case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+
+ op->remap.start = xe_vma_start(old);
+ op->remap.range = xe_vma_size(old);
+
if (op->base.remap.prev) {
struct xe_vma *vma;
bool read_only =
@@ -2478,6 +2495,20 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
}
op->remap.prev = vma;
+
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_end(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_prev) {
+ op->remap.range -=
+ xe_vma_end(vma) -
+ xe_vma_start(old);
+ op->remap.start = xe_vma_end(vma);
+ }
}
if (op->base.remap.next) {
@@ -2499,14 +2530,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
}
op->remap.next = vma;
- }
- /* XXX: Support no doing remaps */
- op->remap.start =
- xe_vma_start(gpuva_to_vma(op->base.remap.unmap->va));
- op->remap.range =
- xe_vma_size(gpuva_to_vma(op->base.remap.unmap->va));
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_start(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_next)
+ op->remap.range -=
+ xe_vma_end(old) -
+ xe_vma_start(vma);
+ }
break;
+ }
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
/* Nothing to do */
@@ -2549,10 +2587,23 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
case DRM_GPUVA_OP_REMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true);
- if (op->remap.prev)
+
+ if (op->remap.prev) {
err |= xe_vm_insert_vma(vm, op->remap.prev);
- if (op->remap.next)
+ if (!err && op->remap.skip_prev)
+ op->remap.prev = NULL;
+ }
+ if (op->remap.next) {
err |= xe_vm_insert_vma(vm, op->remap.next);
+ if (!err && op->remap.skip_next)
+ op->remap.next = NULL;
+ }
+
+ /* Adjust for partial unbind after removin VMA from VM */
+ if (!err) {
+ op->base.remap.unmap->va->va.addr = op->remap.start;
+ op->base.remap.unmap->va->va.range = op->remap.range;
+ }
break;
case DRM_GPUVA_OP_UNMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
@@ -2622,9 +2673,10 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
bool next = !!op->remap.next;
if (!op->remap.unmap_done) {
- vm->async_ops.munmap_rebind_inflight = true;
- if (prev || next)
+ if (prev || next) {
+ vm->async_ops.munmap_rebind_inflight = true;
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
+ }
err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
op->num_syncs,
!prev && !next ? op->fence : NULL,
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index dbe6aed6d2cf..29ec04fa1e26 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -29,6 +29,9 @@ struct xe_vm;
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
struct xe_vma {
/** @gpuva: Base GPUVA object */
@@ -336,6 +339,10 @@ struct xe_vma_op_remap {
u64 start;
/** @range: range of the VMA unmap */
u64 range;
+ /** @skip_prev: skip prev rebind */
+ bool skip_prev;
+ /** @skip_next: skip next rebind */
+ bool skip_next;
/** @unmap_done: unmap operation in done */
bool unmap_done;
};
--
2.34.1
More information about the Intel-xe
mailing list