[PATCH 2/4] drm/xe: Annotate each dumpable vma as such
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Wed Jan 24 16:52:43 UTC 2024
In preparation for snapshot dumping, mark each dumpable VMA as such, so
we can walk over the VM later and dump it.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 13 +++++++++++++
drivers/gpu/drm/xe/xe_vm_types.h | 3 +++
2 files changed, 16 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 66b7d6124a37..0c2540971b17 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -784,6 +784,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -796,6 +797,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+ bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -820,6 +822,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.flags |= XE_VMA_READ_ONLY;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+ if (dumpable)
+ vma->gpuva.flags |= XE_VMA_DUMPABLE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2104,6 +2108,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
@@ -2291,6 +2296,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->map.dumpable ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2315,6 +2322,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
@@ -2346,6 +2356,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 63e8a50b88e9..2877f44bef7d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -32,6 +32,7 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -291,6 +292,8 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @dumpable: whether BO is dumped on GPU hang */
+ bool dumpable;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
--
2.43.0
More information about the Intel-xe
mailing list