[PATCH v5 03/23] drm/xe/device: Update handling of xe_force_wake_get return
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Tue Sep 24 12:16:21 UTC 2024
With xe_force_wake_get() now returning the refcount-incremented domain
mask, the return value will be 0 for single domains (excluding
XE_FORCEWAKE_ALL) in the event of a failure. Modify the return handling
of xe_force_wake_get() accordingly and pass the return value as input to
xe_force_wake_put().
v3
- return xe_wakeref_t instead of int in xe_force_wake_get()
- xe_force_wake_put() error doesn't need to be escalated/considered as
probing error. It internally WARNS on domain ack failure.
v5
- return unsigned int xe_force_wake_get()
Cc: Badal Nilawar <badal.nilawar at intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 25 ++++++++++++++-----------
1 file changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index cb5a9fd820cf..813762bfc7d5 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -598,8 +598,8 @@ int xe_device_probe_early(struct xe_device *xe)
static int probe_has_flat_ccs(struct xe_device *xe)
{
struct xe_gt *gt;
+ unsigned int fw_ref;
u32 reg;
- int err;
/* Always enabled/disabled, no runtime check to do */
if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
@@ -607,9 +607,9 @@ static int probe_has_flat_ccs(struct xe_device *xe)
gt = xe_root_mmio_gt(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
@@ -618,7 +618,8 @@ static int probe_has_flat_ccs(struct xe_device *xe)
drm_dbg(&xe->drm,
"Flat CCS has been disabled in bios, May lead to performance impact");
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
int xe_device_probe(struct xe_device *xe)
@@ -869,6 +870,7 @@ void xe_device_wmb(struct xe_device *xe)
void xe_device_td_flush(struct xe_device *xe)
{
struct xe_gt *gt;
+ unsigned int fw_ref;
u8 id;
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
@@ -883,7 +885,8 @@ void xe_device_td_flush(struct xe_device *xe)
if (xe_gt_is_media_type(gt))
continue;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
@@ -898,22 +901,22 @@ void xe_device_td_flush(struct xe_device *xe)
150, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
}
void xe_device_l2_flush(struct xe_device *xe)
{
struct xe_gt *gt;
- int err;
+ uint fw_ref;
gt = xe_root_mmio_gt(xe);
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
spin_lock(>->global_invl_lock);
@@ -923,7 +926,7 @@ void xe_device_l2_flush(struct xe_device *xe)
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(>->global_invl_lock);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
--
2.34.1
More information about the Intel-xe
mailing list