[Intel-xe] [PATCH v2] drm/xe: Reinstate pipelined fence enable_signaling
Thomas Hellström
thomas.hellstrom at linux.intel.com
Fri Sep 15 16:40:57 UTC 2023
With the GPUVA conversion, the xe_bo::vmas member became replaced with
drm_gem_object::gpuva.list, however there was a couple of usage instances
left using the old member. Most notably the pipelined fence
enable_signaling.
Remove the xe_bo::vmas member completely, fix usage instances and
also enable this pipelined fence enable_signaling even for faulting
VM:s since we actually wait for bind fences to complete.
v2:
- Rebase.
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 5 ++---
drivers/gpu/drm/xe/xe_bo_types.h | 2 --
drivers/gpu/drm/xe/xe_pt.c | 2 +-
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 27726d4f3423..c5e4d04c4d58 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -455,7 +455,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
dma_resv_assert_held(bo->ttm.base.resv);
- if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
+ if (!list_empty(&bo->ttm.base.gpuva.list)) {
dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence)
@@ -1046,7 +1046,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
drm_gem_object_release(&bo->ttm.base);
- xe_assert(xe, list_empty(&bo->vmas));
+ xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
if (bo->ggtt_node.size)
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
@@ -1229,7 +1229,6 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->props.preferred_gt = XE_BO_PROPS_INVALID;
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
- INIT_LIST_HEAD(&bo->vmas);
INIT_LIST_HEAD(&bo->pinned_link);
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 2ea9ad423170..946427fd3fe8 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -31,8 +31,6 @@ struct xe_bo {
struct xe_vm *vm;
/** @tile: Tile this BO is attached to (kernel BO only) */
struct xe_tile *tile;
- /** @vmas: List of VMAs for this BO */
- struct list_head vmas;
/** @placements: valid placements for this BO */
struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
/** @placement: current placement for this BO */
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index d1e06c913260..ce8d9e9d1b61 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -265,7 +265,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
if (!pt)
return;
- XE_WARN_ON(!list_empty(&pt->bo->vmas));
+ XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
xe_bo_unpin(pt->bo);
xe_bo_put_deferred(pt->bo, deferred);
--
2.41.0
More information about the Intel-xe
mailing list