[Intel-xe] [PATCH v3 9/9] drm/xe: Avoid doing rebinds
Matthew Brost
matthew.brost at intel.com
Thu Jun 29 20:10:46 UTC 2023
If we dont change page sizes we can avoid doing rebinds rather just do a
partial unbind. The algorithm to determine its page size is greedy as we
assume all pages in the removed VMA are the largest page used in the
VMA.
v2: Don't exceed 100 lines
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_pt.c | 1 +
drivers/gpu/drm/xe/xe_vm.c | 73 +++++++++++++++++++++++++-------
drivers/gpu/drm/xe/xe_vm_types.h | 17 +++-----
3 files changed, 66 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 00855681c0d5..a8d96cbd53e3 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -668,6 +668,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (!is_null)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
+ xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
*action = ACTION_CONTINUE;
return ret;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 27001b3355a0..925c44ee4115 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2384,6 +2384,16 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
return vma;
}
+static u64 xe_vma_max_pte_size(struct xe_vma *vma)
+{
+ if (vma->gpuva.flags & XE_VMA_PTE_1G)
+ return SZ_1G;
+ else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ return SZ_2M;
+
+ return SZ_4K;
+}
+
/*
* Parse operations list and create any resources needed for the operations
* prior to fully committing to the operations. This setup can fail.
@@ -2460,6 +2470,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
break;
}
case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+
+ op->remap.start = xe_vma_start(old);
+ op->remap.range = xe_vma_size(old);
+
if (op->base.remap.prev) {
struct xe_vma *vma;
bool read_only =
@@ -2478,6 +2495,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
}
op->remap.prev = vma;
+
+ /*
+ * XXX: Not sure why userptr doesn't
+ * work but really shouldn't be a use
+ * case.
+ */
+ op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_end(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_prev) {
+ op->remap.range -=
+ xe_vma_end(vma) -
+ xe_vma_start(old);
+ op->remap.start = xe_vma_end(vma);
+ }
}
if (op->base.remap.next) {
@@ -2499,20 +2531,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
}
op->remap.next = vma;
+ op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_start(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_next)
+ op->remap.range -=
+ xe_vma_end(old) -
+ xe_vma_start(vma);
}
-
- /* XXX: Support no doing remaps */
- op->remap.start =
- xe_vma_start(gpuva_to_vma(op->base.remap.unmap->va));
- op->remap.range =
- xe_vma_size(gpuva_to_vma(op->base.remap.unmap->va));
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- op->unmap.start =
- xe_vma_start(gpuva_to_vma(op->base.unmap.va));
- op->unmap.range =
- xe_vma_size(gpuva_to_vma(op->base.unmap.va));
- break;
case DRM_GPUVA_OP_PREFETCH:
/* Nothing to do */
break;
@@ -2554,10 +2583,23 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
case DRM_GPUVA_OP_REMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true);
- if (op->remap.prev)
+
+ if (op->remap.prev) {
err |= xe_vm_insert_vma(vm, op->remap.prev);
- if (op->remap.next)
+ if (!err && op->remap.skip_prev)
+ op->remap.prev = NULL;
+ }
+ if (op->remap.next) {
err |= xe_vm_insert_vma(vm, op->remap.next);
+ if (!err && op->remap.skip_next)
+ op->remap.next = NULL;
+ }
+
+ /* Adjust for partial unbind after removin VMA from VM */
+ if (!err) {
+ op->base.remap.unmap->va->va.addr = op->remap.start;
+ op->base.remap.unmap->va->va.range = op->remap.range;
+ }
break;
case DRM_GPUVA_OP_UNMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
@@ -2627,9 +2669,10 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
bool next = !!op->remap.next;
if (!op->remap.unmap_done) {
- vm->async_ops.munmap_rebind_inflight = true;
- if (prev || next)
+ if (prev || next) {
+ vm->async_ops.munmap_rebind_inflight = true;
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
+ }
err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
op->num_syncs,
!prev && !next ? op->fence : NULL,
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index cad806bafbfc..b517b711929c 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -29,6 +29,9 @@ struct xe_vm;
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
struct xe_vma {
/** @gpuva: Base GPUVA object */
@@ -323,14 +326,6 @@ struct xe_vma_op_map {
bool is_null;
};
-/** struct xe_vma_op_unmap - VMA unmap operation */
-struct xe_vma_op_unmap {
- /** @start: start of the VMA unmap */
- u64 start;
- /** @range: range of the VMA unmap */
- u64 range;
-};
-
/** struct xe_vma_op_remap - VMA remap operation */
struct xe_vma_op_remap {
/** @prev: VMA preceding part of a split mapping */
@@ -341,6 +336,10 @@ struct xe_vma_op_remap {
u64 start;
/** @range: range of the VMA unmap */
u64 range;
+ /** @skip_prev: skip prev rebind */
+ bool skip_prev;
+ /** @skip_next: skip next rebind */
+ bool skip_next;
/** @unmap_done: unmap operation in done */
bool unmap_done;
};
@@ -398,8 +397,6 @@ struct xe_vma_op {
union {
/** @map: VMA map operation specific data */
struct xe_vma_op_map map;
- /** @unmap: VMA unmap operation specific data */
- struct xe_vma_op_unmap unmap;
/** @remap: VMA remap operation specific data */
struct xe_vma_op_remap remap;
/** @prefetch: VMA prefetch operation specific data */
--
2.34.1
More information about the Intel-xe
mailing list