[CI] drm/xe: Replace xe_device_wmb by wmb
Ashutosh Dixit
ashutosh.dixit at intel.com
Mon Aug 26 23:41:00 UTC 2024
CI ONLY for now
In xe_device_wmb(), it is not clear what the purpose of register write
following wmb() is. Replace xe_device_wmb() with just wmb() to see if we
see any failures in CI.
Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 18 ------------------
drivers/gpu/drm/xe/xe_device.h | 2 --
drivers/gpu/drm/xe/xe_guc_ct.c | 2 +-
drivers/gpu/drm/xe/xe_guc_submit.c | 2 +-
drivers/gpu/drm/xe/xe_migrate.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 2 +-
6 files changed, 4 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b6db7e082d887..f0154281f3eea 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -800,24 +800,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
-/**
- * xe_device_wmb() - Device specific write memory barrier
- * @xe: the &xe_device
- *
- * While wmb() is sufficient for a barrier if we use system memory, on discrete
- * platforms with device memory we additionally need to issue a register write.
- * Since it doesn't matter which register we write to, use the read-only VF_CAP
- * register that is also marked as accessible by the VFs.
- */
-void xe_device_wmb(struct xe_device *xe)
-{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
- wmb();
- if (IS_DGFX(xe))
- xe_mmio_write32(gt, VF_CAP_REG, 0);
-}
-
/**
* xe_device_td_flush() - Flush transient L3 cache entries
* @xe: The device
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index f052c06a2d2f5..3be5fa94fa113 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -42,8 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
-void xe_device_wmb(struct xe_device *xe);
-
static inline struct xe_file *to_xe_file(const struct drm_file *file)
{
return file->driver_priv;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index f24dd52239268..5ac610f227d88 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -596,7 +596,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
/* Write H2G ensuring visable before descriptor update */
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
- xe_device_wmb(xe);
+ wmb();
/* Update local copies */
h2g->info.tail = (tail + full_len) % h2g->info.size;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index fbbe6a487bbb3..bbf7ec0111642 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -699,7 +699,7 @@ static void wq_item_append(struct xe_exec_queue *q)
q->guc->wqi_tail += wqi_size;
xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
- xe_device_wmb(xe);
+ wmb();
map = xe_lrc_parallel_map(q->lrc[0]);
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index cbf54be224c96..06cc1f1e65a1f 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1298,7 +1298,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
}
trace_xe_vm_cpu_bind(vm);
- xe_device_wmb(vm->xe);
+ wmb();
return dma_fence_get_stub();
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index bfa4880a1673a..975f174df41c9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3186,7 +3186,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) {
if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
+ wmb();
xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
&fence[fence_id],
true);
--
2.41.0
More information about the Intel-xe
mailing list