[PATCH v1 1/7] drm/xe/sa: Avoid caching GGTT address within the manager
Tomasz Lis
tomasz.lis at intel.com
Tue May 13 22:49:46 UTC 2025
Non-virtualized resources require fixups after SRIOV VF migration.
Caching GGTT references rather than re-computing them from the
underlying Buffer Object is something we want to avoid, as such
code would require additional fixup step and additional locking
around all the places where the address is accessed.
This change removes the cached address, and introduces a function
which recomputes and returns the address instead.
Signed-off-by: Tomasz Lis <tomasz.lis at intel.com>
---
drivers/gpu/drm/xe/xe_gt_debugfs.c | 3 ++-
drivers/gpu/drm/xe/xe_guc_buf.c | 2 +-
drivers/gpu/drm/xe/xe_sa.c | 1 -
drivers/gpu/drm/xe/xe_sa.h | 8 +++++++-
drivers/gpu/drm/xe/xe_sa_types.h | 1 -
5 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 119a55bb7580..8aa84050c18b 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -29,6 +29,7 @@
#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
+#include "xe_sa.h"
#include "xe_sriov.h"
#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
@@ -146,7 +147,7 @@ static int sa_info(struct xe_gt *gt, struct drm_printer *p)
xe_pm_runtime_get(gt_to_xe(gt));
drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
- tile->mem.kernel_bb_pool->gpu_addr);
+ xe_sa_bo_manager_gpu_addr(tile->mem.kernel_bb_pool));
xe_pm_runtime_put(gt_to_xe(gt));
return 0;
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 0193c94dd6a0..c459c71a3510 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -168,7 +168,7 @@ u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *p
if (offset < 0 || offset + size > cache->sam->base.size)
return 0;
- return cache->sam->gpu_addr + offset;
+ return xe_sa_bo_manager_gpu_addr(cache->sam) + offset;
}
#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 1d43e183ca21..fedd017d6dd3 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -69,7 +69,6 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
- sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 1170ee5a81a8..614d858b2183 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -7,6 +7,7 @@
#include <linux/sizes.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_sa_types.h"
struct dma_fence;
@@ -43,9 +44,14 @@ to_xe_sa_manager(struct drm_suballoc_manager *mng)
return container_of(mng, struct xe_sa_manager, base);
}
+static inline u64 xe_sa_bo_manager_gpu_addr(struct xe_sa_manager *sa_manager)
+{
+ return xe_bo_ggtt_addr(sa_manager->bo);
+}
+
static inline u64 xe_sa_bo_gpu_addr(struct drm_suballoc *sa)
{
- return to_xe_sa_manager(sa->manager)->gpu_addr +
+ return xe_sa_bo_manager_gpu_addr(to_xe_sa_manager(sa->manager)) +
drm_suballoc_soffset(sa);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2b070ff1292e..cb7238799dcb 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -12,7 +12,6 @@ struct xe_bo;
struct xe_sa_manager {
struct drm_suballoc_manager base;
struct xe_bo *bo;
- u64 gpu_addr;
void *cpu_ptr;
bool is_iomem;
};
--
2.25.1
More information about the Intel-xe
mailing list