[Intel-xe] [PATCH v2 21/31] drm/gpuva: Add drm device to GPUVA manager
Rodrigo Vivi
rodrigo.vivi at kernel.org
Fri May 5 19:39:40 UTC 2023
On Mon, May 01, 2023 at 05:17:17PM -0700, Matthew Brost wrote:
> This is the logical place for this, will help with upcoming changes too.
Please split the xe from the drm stuff in different patches and
a bit more words of why would be better.
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/drm_gpuva_mgr.c | 3 +++
> drivers/gpu/drm/xe/xe_migrate.c | 10 +++++-----
> drivers/gpu/drm/xe/xe_pt.c | 18 +++++++++---------
> drivers/gpu/drm/xe/xe_vm.c | 31 +++++++++++++++----------------
> drivers/gpu/drm/xe/xe_vm.h | 10 ++++++++++
> drivers/gpu/drm/xe/xe_vm_types.h | 2 --
> include/drm/drm_gpuva_mgr.h | 4 ++++
> 7 files changed, 46 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c
> index bd7d27ee44bb..137322945e91 100644
> --- a/drivers/gpu/drm/drm_gpuva_mgr.c
> +++ b/drivers/gpu/drm/drm_gpuva_mgr.c
> @@ -413,6 +413,7 @@ static void __drm_gpuva_remove(struct drm_gpuva *va);
> /**
> * drm_gpuva_manager_init - initialize a &drm_gpuva_manager
> * @mgr: pointer to the &drm_gpuva_manager to initialize
> + * @drm: drm device
> * @name: the name of the GPU VA space
> * @start_offset: the start offset of the GPU VA space
> * @range: the size of the GPU VA space
> @@ -427,6 +428,7 @@ static void __drm_gpuva_remove(struct drm_gpuva *va);
> */
> void
> drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
> + struct drm_device *drm,
> const char *name,
> u64 start_offset, u64 range,
> u64 reserve_offset, u64 reserve_range,
> @@ -437,6 +439,7 @@ drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
> mgr->mm_start = start_offset;
> mgr->mm_range = range;
>
> + mgr->drm = drm;
> mgr->name = name ? name : "unknown";
> mgr->ops = ops;
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index b44aa094a466..0a393c5772e5 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -129,7 +129,7 @@ static u64 xe_migrate_vram_ofs(u64 addr)
> static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
> {
> struct xe_gt *gt = m->gt;
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> size_t cleared_size;
> u64 vram_addr;
> bool is_vram;
> @@ -175,7 +175,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
> /* Need to be sure everything fits in the first PT, or create more */
> XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M);
>
> - bo = xe_bo_create_pin_map(vm->xe, m->gt, vm,
> + bo = xe_bo_create_pin_map(xe_vm_device(vm), m->gt, vm,
> num_entries * XE_PAGE_SIZE,
> ttm_bo_type_kernel,
> XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
> @@ -1051,7 +1051,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
>
> if (wait_vm && !dma_resv_test_signaled(&vm->resv,
> DMA_RESV_USAGE_BOOKKEEP)) {
> - vm_dbg(&vm->xe->drm, "wait on VM for munmap");
> + vm_dbg(&xe_vm_device(vm)->drm, "wait on VM for munmap");
> return ERR_PTR(-ETIME);
> }
>
> @@ -1069,7 +1069,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
>
> if (vm) {
> trace_xe_vm_cpu_bind(vm);
> - xe_device_wmb(vm->xe);
> + xe_device_wmb(xe_vm_device(vm));
> }
>
> fence = dma_fence_get_stub();
> @@ -1263,7 +1263,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> * trigger preempts before moving forward
> */
> if (first_munmap_rebind) {
> - vm_dbg(&vm->xe->drm, "wait on first_munmap_rebind");
> + vm_dbg(&xe_vm_device(vm)->drm, "wait on first_munmap_rebind");
> err = job_add_deps(job, &vm->resv,
> DMA_RESV_USAGE_BOOKKEEP);
> if (err)
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 8eab8e1bbaf0..4167f666d98d 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -218,7 +218,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt,
> if (!pt)
> return ERR_PTR(-ENOMEM);
>
> - bo = xe_bo_create_pin_map(vm->xe, gt, vm, SZ_4K,
> + bo = xe_bo_create_pin_map(xe_vm_device(vm), gt, vm, SZ_4K,
> ttm_bo_type_kernel,
> XE_BO_CREATE_VRAM_IF_DGFX(gt) |
> XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
> @@ -264,11 +264,11 @@ void xe_pt_populate_empty(struct xe_gt *gt, struct xe_vm *vm,
> * FIXME: Some memory is allocated already allocated to zero?
> * Find out which memory that is and avoid this memset...
> */
> - xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
> + xe_map_memset(xe_vm_device(vm), map, 0, 0, SZ_4K);
> } else {
> empty = __xe_pt_empty_pte(gt, vm, pt->level);
> for (i = 0; i < XE_PDES; i++)
> - xe_pt_write(vm->xe, map, i, empty);
> + xe_pt_write(xe_vm_device(vm), map, i, empty);
> }
> }
>
> @@ -355,7 +355,7 @@ int xe_pt_create_scratch(struct xe_device *xe, struct xe_gt *gt,
> if (IS_ERR(vm->scratch_bo[id]))
> return PTR_ERR(vm->scratch_bo[id]);
>
> - xe_map_memset(vm->xe, &vm->scratch_bo[id]->vmap, 0, 0,
> + xe_map_memset(xe_vm_device(vm), &vm->scratch_bo[id]->vmap, 0, 0,
> vm->scratch_bo[id]->size);
>
> for (i = 0; i < vm->pt_root[id]->level; i++) {
> @@ -538,7 +538,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
> if (unlikely(xe_child))
> parent->drm.dir->entries[offset] = &xe_child->drm;
>
> - xe_pt_write(xe_walk->vm->xe, map, offset, pte);
> + xe_pt_write(xe_vm_device(xe_walk->vm), map, offset, pte);
> parent->num_live++;
> } else {
> /* Shared pt. Stage update. */
> @@ -1337,7 +1337,7 @@ __xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
> xe_vm_assert_held(vm);
> XE_BUG_ON(xe_gt_is_media_type(gt));
>
> - vm_dbg(&xe_vma_vm(vma)->xe->drm,
> + vm_dbg(&xe_vma_device(vma)->drm,
> "Preparing bind, with range [%llx...%llx) engine %p.\n",
> xe_vma_start(vma), xe_vma_end(vma) - 1, e);
>
> @@ -1366,7 +1366,7 @@ __xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
>
>
> if (last_munmap_rebind)
> - vm_dbg(&vm->xe->drm, "last_munmap_rebind");
> + vm_dbg(&xe_vm_device(vm)->drm, "last_munmap_rebind");
>
> /* TLB invalidation must be done before signaling rebind */
> if (rebind && !xe_vm_no_dma_fences(xe_vma_vm(vma))) {
> @@ -1401,7 +1401,7 @@ __xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
> xe_bo_put_commit(&deferred);
> }
> if (!rebind && last_munmap_rebind && xe_vm_in_compute_mode(vm))
> - queue_work(vm->xe->ordered_wq,
> + queue_work(xe_vm_device(vm)->ordered_wq,
> &vm->preempt.rebind_work);
> } else {
> kfree(ifence);
> @@ -1664,7 +1664,7 @@ __xe_pt_unbind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
> xe_vm_assert_held(vm);
> XE_BUG_ON(xe_gt_is_media_type(gt));
>
> - vm_dbg(&xe_vma_vm(vma)->xe->drm,
> + vm_dbg(&xe_vma_device(vma)->drm,
> "Preparing unbind, with range [%llx...%llx) engine %p.\n",
> xe_vma_start(vma), xe_vma_end(vma) - 1, e);
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e8d9939ee535..688130c509a4 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -50,7 +50,7 @@ int xe_vma_userptr_check_repin(struct xe_vma *vma)
> int xe_vma_userptr_pin_pages(struct xe_vma *vma)
> {
> struct xe_vm *vm = xe_vma_vm(vma);
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
> struct page **pages;
> bool in_kthread = !current->mm;
> @@ -852,12 +852,12 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
> if (gt_mask) {
> vma->gt_mask = gt_mask;
> } else {
> - for_each_gt(gt, vm->xe, id)
> + for_each_gt(gt, xe_vm_device(vm), id)
> if (!xe_gt_is_media_type(gt))
> vma->gt_mask |= 0x1 << id;
> }
>
> - if (vm->xe->info.platform == XE_PVC)
> + if (xe_vm_device(vm)->info.platform == XE_PVC)
> vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
>
> if (bo) {
> @@ -904,7 +904,7 @@ static void vm_remove_extobj(struct xe_vma *vma)
> static void xe_vma_destroy_late(struct xe_vma *vma)
> {
> struct xe_vm *vm = xe_vma_vm(vma);
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> bool read_only = xe_vma_read_only(vma);
>
> if (xe_vma_is_userptr(vma)) {
> @@ -1084,7 +1084,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> if (!vm)
> return ERR_PTR(-ENOMEM);
>
> - vm->xe = xe;
> kref_init(&vm->refcount);
> dma_resv_init(&vm->resv);
>
> @@ -1125,7 +1124,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> if (err)
> goto err_put;
>
> - drm_gpuva_manager_init(&vm->mgr, "Xe VM", 0, vm->size, 0, 0,
> + drm_gpuva_manager_init(&vm->mgr, &xe->drm, "Xe VM", 0, vm->size, 0, 0,
> &gpuva_ops);
> if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
> vm->flags |= XE_VM_FLAGS_64K;
> @@ -1284,7 +1283,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
> {
> struct list_head contested;
> struct ww_acquire_ctx ww;
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> struct xe_gt *gt;
> struct xe_vma *vma, *next_vma;
> struct drm_gpuva *gpuva;
> @@ -1387,7 +1386,7 @@ static void vm_destroy_work_func(struct work_struct *w)
> struct xe_vm *vm =
> container_of(w, struct xe_vm, destroy_work);
> struct ww_acquire_ctx ww;
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> struct xe_gt *gt;
> u8 id;
> void *lookup;
> @@ -1481,7 +1480,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
> return ERR_PTR(-ENOMEM);
> }
>
> - for_each_gt(gt, vm->xe, id) {
> + for_each_gt(gt, xe_vm_device(vm), id) {
> if (!(vma->gt_present & BIT(id)))
> goto next;
>
> @@ -1555,7 +1554,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
> return ERR_PTR(-ENOMEM);
> }
>
> - for_each_gt(gt, vm->xe, id) {
> + for_each_gt(gt, xe_vm_device(vm), id) {
> if (!(vma->gt_mask & BIT(id)))
> goto next;
>
> @@ -2061,7 +2060,7 @@ static int vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
> static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
> u64 addr, u64 range, u32 op)
> {
> - struct xe_device *xe = vm->xe;
> + struct xe_device *xe = xe_vm_device(vm);
> struct xe_vma *vma;
> bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
>
> @@ -2164,7 +2163,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>
> lockdep_assert_held_write(&vm->lock);
>
> - vm_dbg(&vm->xe->drm,
> + vm_dbg(&xe_vm_device(vm)->drm,
> "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
> VM_BIND_OP(operation), addr, range, bo_offset_or_userptr);
>
> @@ -2232,7 +2231,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>
> if (!IS_ERR(ops))
> drm_gpuva_for_each_op(__op, ops)
> - print_op(vm->xe, __op);
> + print_op(xe_vm_device(vm), __op);
>
> return ops;
> }
> @@ -2783,7 +2782,7 @@ static void xe_vma_op_work_func(struct work_struct *w)
> down_write(&vm->lock);
> err = xe_vma_op_execute(vm, op);
> if (err) {
> - drm_warn(&vm->xe->drm,
> + drm_warn(&xe_vm_device(vm)->drm,
> "Async VM op(%d) failed with %d",
> op->base.op, err);
> vm_set_async_error(vm, err);
> @@ -3103,7 +3102,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>
> /* Rebinds may have been blocked, give worker a kick */
> if (xe_vm_in_compute_mode(vm))
> - queue_work(vm->xe->ordered_wq,
> + queue_work(xe_vm_device(vm)->ordered_wq,
> &vm->preempt.rebind_work);
> }
>
> @@ -3315,7 +3314,7 @@ void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
> */
> int xe_vm_invalidate_vma(struct xe_vma *vma)
> {
> - struct xe_device *xe = xe_vma_vm(vma)->xe;
> + struct xe_device *xe = xe_vm_device(xe_vma_vm(vma));
> struct xe_gt *gt;
> u32 gt_needs_invalidate = 0;
> int seqno[XE_MAX_GT];
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 96e2c6b07bf8..cbbe95d6291f 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -52,6 +52,11 @@ static inline bool xe_vm_is_closed(struct xe_vm *vm)
> struct xe_vma *
> xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
>
> +static inline struct xe_device *xe_vm_device(struct xe_vm *vm)
> +{
> + return container_of(vm->mgr.drm, struct xe_device, drm);
> +}
> +
> static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
> {
> return container_of(gpuva->mgr, struct xe_vm, mgr);
> @@ -102,6 +107,11 @@ static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
> return container_of(vma->gpuva.mgr, struct xe_vm, mgr);
> }
>
> +static inline struct xe_device *xe_vma_device(struct xe_vma *vma)
> +{
> + return xe_vm_device(xe_vma_vm(vma));
> +}
> +
> static inline bool xe_vma_read_only(struct xe_vma *vma)
> {
> return vma->gpuva.flags & XE_VMA_READ_ONLY;
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index df4797ec4d7f..fca42910dcae 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -137,8 +137,6 @@ struct xe_vm {
> /** @mgr: base GPUVA used to track VMAs */
> struct drm_gpuva_manager mgr;
>
> - struct xe_device *xe;
> -
> struct kref refcount;
>
> /* engine used for (un)binding vma's */
> diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h
> index 62169d850098..55b0acfdcc44 100644
> --- a/include/drm/drm_gpuva_mgr.h
> +++ b/include/drm/drm_gpuva_mgr.h
> @@ -169,6 +169,9 @@ static inline bool drm_gpuva_evicted(struct drm_gpuva *va)
> * There should be one manager instance per GPU virtual address space.
> */
> struct drm_gpuva_manager {
> + /** @drm: drm device */
> + struct drm_device *drm;
> +
> /**
> * @name: the name of the DRM GPU VA space
> */
> @@ -204,6 +207,7 @@ struct drm_gpuva_manager {
> };
>
> void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
> + struct drm_device *drm,
> const char *name,
> u64 start_offset, u64 range,
> u64 reserve_offset, u64 reserve_range,
> --
> 2.34.1
>
More information about the Intel-xe
mailing list