[Intel-xe] [PATCH v2 1/2] Revert "drm/xe: Pad GGTT mapping with an extra page pointing to scratch"
Niranjana Vishwanathapura
niranjana.vishwanathapura at intel.com
Fri Apr 7 03:27:50 UTC 2023
This reverts commit 70e94be2729e5c725c53b1aa2d10c073566bb1ca.
Resolve minor conflict by retaining XE_BO_CREATE_VRAM_MASK flag.
Coalescing GGTT invalidations is causing a bunch of hangs during
driver load and in user space. Benefit is rather small too, so
revert it to stabilize the stack.
Tested-by: Matt Roper <matthew.d.roper at intel.com> # ADL-P
Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 1 -
drivers/gpu/drm/xe/xe_bo.h | 1 +
drivers/gpu/drm/xe/xe_bo_types.h | 4 +---
drivers/gpu/drm/xe/xe_ggtt.c | 30 +++++++++---------------------
4 files changed, 11 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 5460e6fe3c1f..c7f175a149be 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -975,7 +975,6 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return bo;
}
- bo->requested_size = size;
if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
!(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 8f5a7ad10d09..dd58edcb9398 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -213,6 +213,7 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
static inline u32
xe_bo_ggtt_addr(struct xe_bo *bo)
{
+ XE_BUG_ON(bo->ggtt_node.size > bo->size);
XE_BUG_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
return bo->ggtt_node.start;
}
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index db217500a970..06de3330211d 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -23,9 +23,7 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
- /** @requested_size: Requested size of this buffer object */
- size_t requested_size;
- /** @size: Size of this buffer object after alignment adjusting */
+ /** @size: Size of this buffer object */
size_t size;
/** @flags: flags for this buffer object */
u32 flags;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index a430d1568890..2e31b1ce3e4c 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -263,6 +263,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
+ struct xe_device *xe = gt_to_xe(ggtt->gt);
u64 start = bo->ggtt_node.start;
u64 offset, pte;
@@ -273,12 +274,8 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
- if (bo->size == bo->requested_size) {
- pte = xe_ggtt_pte_encode(ggtt->scratch ?: bo, 0);
- xe_ggtt_set_pte(ggtt, start + bo->size, pte);
- }
-
- if (ggtt->invalidate) {
+ /* XXX: Without doing this everytime on integrated driver load fails */
+ if (ggtt->invalidate || !IS_DGFX(xe)) {
xe_ggtt_invalidate(ggtt->gt);
ggtt->invalidate = false;
}
@@ -287,7 +284,6 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
- u64 size = bo->size;
int err;
u64 alignment = GEN8_PAGE_SIZE;
@@ -295,6 +291,8 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
alignment = SZ_64K;
if (XE_WARN_ON(bo->ggtt_node.size)) {
+ /* Someone's already inserted this BO in the GGTT */
+ XE_BUG_ON(bo->ggtt_node.size != bo->size);
return 0;
}
@@ -302,21 +300,8 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (err)
return err;
- /*
- * We add an extra page when mapping a BO in the GGTT so we can coalesce
- * GGTT invalidations. Without this extra page GGTT prefetches can leave
- * entries in the TLB pointing to an invalidation GGTT entry when in
- * fact we have programmed this GGTT entry to a valid entry. BO aligned
- * to 64k already have padding so no need to add an extra page.
- */
- if (bo->size == bo->requested_size) {
- size += SZ_4K;
- if (end != U64_MAX)
- end += SZ_4K;
- }
-
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, size,
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err)
xe_ggtt_map_bo(ggtt, bo);
@@ -354,6 +339,9 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
if (XE_WARN_ON(!bo->ggtt_node.size))
return;
+ /* This BO is not currently in the GGTT */
+ XE_BUG_ON(bo->ggtt_node.size != bo->size);
+
xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
}
--
2.21.0.rc0.32.g243a4c7e27
More information about the Intel-xe
mailing list