[PATCH v2 23/23] drm/xe: Change return type to void for xe_force_wake_put
Ghimiray, Himal Prasad
himal.prasad.ghimiray at intel.com
Tue Sep 17 05:24:42 UTC 2024
On 17-09-2024 10:51, Nilawar, Badal wrote:
>
>
> On 17-09-2024 10:22, Nilawar, Badal wrote:
>>
>>
>> On 17-09-2024 10:18, Ghimiray, Himal Prasad wrote:
>>>
>>>
>>> On 17-09-2024 00:12, Nilawar, Badal wrote:
>>>>
>>>>
>>>> On 13-09-2024 00:46, Himal Prasad Ghimiray wrote:
>>>>> In case of xe_force_wake_put failure caller just calls XE_WARN_ON
>>>>> and continues. Move XE_WARN_ON inside the xe_force_wake_put() and
>>>>> return
>>>>> void.
>>>>>
>>>>> Cc: Badal Nilawar <badal.nilawar at intel.com>
>>>>> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
>>>>> Cc: Lucas De Marchi <lucas.demarchi at intel.com>
>>>>> Cc: Nirmoy Das <nirmoy.das at intel.com>
>>>>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>>>>> ---
>>>>> drivers/gpu/drm/xe/xe_device.c | 3 ++-
>>>>> drivers/gpu/drm/xe/xe_drm_client.c | 4 ++--
>>>>> drivers/gpu/drm/xe/xe_force_wake.c | 14 ++++++++------
>>>>> drivers/gpu/drm/xe/xe_force_wake.h | 4 ++--
>>>>> drivers/gpu/drm/xe/xe_gt.c | 22 +++++++++-------------
>>>>> drivers/gpu/drm/xe/xe_gt_debugfs.c | 6 ++----
>>>>> drivers/gpu/drm/xe/xe_gt_idle.c | 6 +++---
>>>>> drivers/gpu/drm/xe/xe_guc_pc.c | 14 +++++++-------
>>>>> drivers/gpu/drm/xe/xe_oa.c | 4 ++--
>>>>> drivers/gpu/drm/xe/xe_pat.c | 30 ++++++++++++++
>>>>> +---------------
>>>>> drivers/gpu/drm/xe/xe_query.c | 2 +-
>>>>> drivers/gpu/drm/xe/xe_reg_sr.c | 12 ++++--------
>>>>> drivers/gpu/drm/xe/xe_vram.c | 3 ++-
>>>>> 13 files changed, 59 insertions(+), 65 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/
>>>>> xe_device.c
>>>>> index 5edb1cf51ea5..cce754801e8d 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_device.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_device.c
>>>>> @@ -620,7 +620,8 @@ static int probe_has_flat_ccs(struct xe_device
>>>>> *xe)
>>>>> drm_dbg(&xe->drm,
>>>>> "Flat CCS has been disabled in bios, May lead to
>>>>> performance impact");
>>>>> - return xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + return 0;
>>>>> }
>>>>> int xe_device_probe(struct xe_device *xe)
>>>>> diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/
>>>>> xe/ xe_drm_client.c
>>>>> index 01f643f6aaba..e5defce08281 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_drm_client.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_drm_client.c
>>>>> @@ -301,12 +301,12 @@ static void show_run_ticks(struct drm_printer
>>>>> *p, struct drm_file *file)
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), fw);
>>>>> if (fw_ref != fw) {
>>>>> hwe = NULL;
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> break;
>>>>> }
>>>>> gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> break;
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/
>>>>> xe/ xe_force_wake.c
>>>>> index 73d37e79da9c..01b8bfdd748f 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_force_wake.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_force_wake.c
>>>>> @@ -11,6 +11,7 @@
>>>>> #include "regs/xe_reg_defs.h"
>>>>> #include "xe_gt.h"
>>>>> #include "xe_gt_printk.h"
>>>>> +#include "xe_macros.h"
>>>>> #include "xe_mmio.h"
>>>>> #include "xe_sriov.h"
>>>>> @@ -208,11 +209,10 @@ int __must_check xe_force_wake_get(struct
>>>>> xe_force_wake *fw,
>>>>> * and waits for acknowledgment for domain to sleep within
>>>>> specified timeout.
>>>>> * Ensure this function is always called with return of
>>>>> xe_force_wake_get() as
>>>>> * @domains_mask.
>>>>> - *
>>>>> - * Returns 0 in case of success or non-zero in case of timeout of ack
>
> Instead of changing doc here, its better to add final doc here itself.
>
>>>>> + * Warns in case of timeout of ack from domain.
>>>>> */
>>>>> -int xe_force_wake_put(struct xe_force_wake *fw,
>>>>> - int domains_mask)
>>>>> +void xe_force_wake_put(struct xe_force_wake *fw,
>>>>> + int domains_mask)
>>>>> {
>>>>> struct xe_gt *gt = fw->gt;
>>>>> struct xe_force_wake_domain *domain;
>>>>> @@ -225,7 +225,7 @@ int xe_force_wake_put(struct xe_force_wake *fw,
>>>>> * in error path of individual domains.
>>>>> */
>>>>> if (!domains_mask)
>>>>> - return 0;
>>>>> + return;
>>>>> spin_lock_irqsave(&fw->lock, flags);
>>>>> for_each_fw_domain_masked(domain, domains_mask, fw, tmp) {
>>>>> @@ -240,5 +240,7 @@ int xe_force_wake_put(struct xe_force_wake *fw,
>>>>> fw->awake_domains &= ~sleep;
>>>>> spin_unlock_irqrestore(&fw->lock, flags);
>>>>> - return ret;
>>>>> + if (ret)
>>>>> + XE_WARN_ON("Timedout for domain sleep acknowledgment");
>
> Move this warn on to patch 2/23.
Sure.
>
>>>>
>>>> This looks good to me. There is no need to propagate error if domain
>>>> failed to sleep, warn on is enough. Additionally we should see if fw
>>>> domain ack bit is not cleared due to PCIe read completion timeout.
>>>> i.e. if mmio read for fw ack reg returned value 0xFFFFFFFF. This
>>>> means device is in bad state. In this case should consider putting
>>>> device in wedged state.
>>>>
>>>
>>>
>>> This seems to be good check to have. On cross verifying, I see
>>> similar implementation in i915. If you are ok, How about taking this
>>> as seperate patch ?
>>
>> Yes, i915 has similar implementation. Sure, lets handle it in seperate
>> patch.
>>
>> Regards,
>> Badal
>>>
>>> BR
>>> Himal
>>>
>>>> Regards,
>>>> Badal
>>>>> +
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/
>>>>> xe/ xe_force_wake.h
>>>>> index e17fe316dc3c..c2c729371b2a 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_force_wake.h
>>>>> +++ b/drivers/gpu/drm/xe/xe_force_wake.h
>>>>> @@ -17,8 +17,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt,
>>>>> struct xe_force_wake *fw);
>>>>> int __must_check xe_force_wake_get(struct xe_force_wake *fw,
>>>>> enum xe_force_wake_domains domains);
>>>>> -int xe_force_wake_put(struct xe_force_wake *fw,
>>>>> - int domains_mask);
>>>>> +void xe_force_wake_put(struct xe_force_wake *fw,
>>>>> + int domains_mask);
>>>>> static inline int
>>>>> xe_force_wake_ref(struct xe_force_wake *fw,
>>>>> diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
>>>>> index b0126daee359..5f5760b60eac 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_gt.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_gt.c
>>>>> @@ -446,8 +446,7 @@ static int gt_fw_domain_init(struct xe_gt *gt)
>>>>> */
>>>>> gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> - XE_WARN_ON(err);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>
> I recognize the logic behind implementing this change here since it
> won't disrupt the compilation process. By relocating XE_WARN_ON within
> force_wake_put, it's more appropriate to introduce this cleanup in
> respective patches. This particular patch will solely include the
> alteration of the return type and the documentation.
Thanks for the comment. Will limit this patch to doc addition and
changing return to void.
>
> Regards,
> Badal
>>>>> return 0;
>>>>> @@ -533,9 +532,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
>>>>> if (IS_SRIOV_PF(gt_to_xe(gt)))
>>>>> xe_gt_sriov_pf_init_hw(gt);
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> - XE_WARN_ON(err);
>>>>> -
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return 0;
>>>>> err_force_wake:
>>>>> @@ -787,8 +784,7 @@ static int gt_reset(struct xe_gt *gt)
>>>>> if (err)
>>>>> goto err_out;
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> - XE_WARN_ON(err);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_pm_runtime_put(gt_to_xe(gt));
>>>>> xe_gt_info(gt, "reset done\n");
>>>>> @@ -796,7 +792,7 @@ static int gt_reset(struct xe_gt *gt)
>>>>> return 0;
>>>>> err_out:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> XE_WARN_ON(xe_uc_start(>->uc));
>>>>> err_fail:
>>>>> xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
>>>>> @@ -835,7 +831,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
>>>>> xe_uc_stop_prepare(>->uc);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> }
>>>>> int xe_gt_suspend(struct xe_gt *gt)
>>>>> @@ -857,7 +853,7 @@ int xe_gt_suspend(struct xe_gt *gt)
>>>>> xe_gt_disable_host_l2_vram(gt);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_gt_dbg(gt, "suspended\n");
>>>>> return 0;
>>>>> @@ -865,7 +861,7 @@ int xe_gt_suspend(struct xe_gt *gt)
>>>>> err_msg:
>>>>> err = -ETIMEDOUT;
>>>>> err_force_wake:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
>>>>> return err;
>>>>> @@ -914,7 +910,7 @@ int xe_gt_resume(struct xe_gt *gt)
>>>>> xe_gt_idle_enable_pg(gt);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_gt_dbg(gt, "resumed\n");
>>>>> return 0;
>>>>> @@ -922,7 +918,7 @@ int xe_gt_resume(struct xe_gt *gt)
>>>>> err_msg:
>>>>> err = -ETIMEDOUT;
>>>>> err_force_wake:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
>>>>> return err;
>>>>> diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/
>>>>> xe/ xe_gt_debugfs.c
>>>>> index 86146de1d31c..2ecf3c87c6b0 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
>>>>> @@ -90,7 +90,7 @@ static int hw_engines(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> struct xe_hw_engine *hwe;
>>>>> enum xe_hw_engine_id id;
>>>>> - int fw_ref, err;
>>>>> + int fw_ref;
>>>>> xe_pm_runtime_get(xe);
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> @@ -103,10 +103,8 @@ static int hw_engines(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> for_each_hw_engine(hwe, gt, id)
>>>>> xe_hw_engine_print(hwe, p);
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_pm_runtime_put(xe);
>>>>> - if (err)
>>>>> - return err;
>>>>> return 0;
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/
>>>>> xe_gt_idle.c
>>>>> index 9af81b07ab7a..1a7ee5681da6 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_gt_idle.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_gt_idle.c
>>>>> @@ -144,7 +144,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
>>>>> }
>>>>> xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle-
>>>>> >powergate_enable);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> }
>>>>> void xe_gt_idle_disable_pg(struct xe_gt *gt)
>>>>> @@ -161,7 +161,7 @@ void xe_gt_idle_disable_pg(struct xe_gt *gt)
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> XE_WARN_ON(!fw_ref);
>>>>> xe_mmio_write32(>->mmio, POWERGATE_ENABLE, gtidle-
>>>>> >powergate_enable);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> }
>>>>> /**
>>>>> @@ -223,7 +223,7 @@ int xe_gt_idle_pg_print(struct xe_gt *gt,
>>>>> struct drm_printer *p)
>>>>> pg_enabled = xe_mmio_read32(>->mmio, POWERGATE_ENABLE);
>>>>> pg_status = xe_mmio_read32(>->mmio,
>>>>> POWERGATE_DOMAIN_STATUS);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> }
>>>>> if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
>>>>> diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/
>>>>> xe_guc_pc.c
>>>>> index c7a00565216d..27ad412936f7 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_guc_pc.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_guc_pc.c
>>>>> @@ -423,7 +423,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc
>>>>> *pc, u32 *freq)
>>>>> */
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> if (fw_ref != XE_FORCEWAKE_ALL) {
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return -ETIMEDOUT;
>>>>> }
>>>>> @@ -432,7 +432,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc
>>>>> *pc, u32 *freq)
>>>>> *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
>>>>> *freq = decode_freq(*freq);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return 0;
>>>>> }
>>>>> @@ -508,7 +508,7 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc
>>>>> *pc, u32 *freq)
>>>>> *freq = pc_get_min_freq(pc);
>>>>> fw:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> out:
>>>>> mutex_unlock(&pc->freq_lock);
>>>>> return ret;
>>>>> @@ -870,13 +870,13 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc
>>>>> *pc)
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> if (fw_ref != XE_FORCEWAKE_ALL) {
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return -ETIMEDOUT;
>>>>> }
>>>>> xe_gt_idle_disable_c6(gt);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return 0;
>>>>> }
>>>>> @@ -968,7 +968,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> if (fw_ref != XE_FORCEWAKE_ALL) {
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return -ETIMEDOUT;
>>>>> }
>>>>> @@ -1013,7 +1013,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
>>>>> ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
>>>>> out:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return ret;
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
>>>>> index 80e5c4ef86ef..8d7d9e7ade51 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_oa.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_oa.c
>>>>> @@ -840,7 +840,7 @@ static void xe_oa_stream_destroy(struct
>>>>> xe_oa_stream *stream)
>>>>> xe_oa_free_oa_buffer(stream);
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
>>>>> + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> xe_pm_runtime_put(stream->oa->xe);
>>>>> /* Wa_1509372804:pvc: Unset the override of GUCRC mode to
>>>>> enable rc6 */
>>>>> @@ -1463,7 +1463,7 @@ static int xe_oa_stream_init(struct
>>>>> xe_oa_stream *stream,
>>>>> err_free_oa_buf:
>>>>> xe_oa_free_oa_buffer(stream);
>>>>> err_fw_put:
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_pm_runtime_put(stream->oa->xe);
>>>>> if (stream->override_gucrc)
>>>>> xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>-
>>>>> >uc.guc.pc));
>>>>> diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
>>>>> index 96d5ec1fbbd9..4372dd727e9f 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_pat.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_pat.c
>>>>> @@ -182,7 +182,7 @@ static void program_pat_mcr(struct xe_gt *gt,
>>>>> const struct xe_pat_table_entry ta
>>>>> static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
>>>>> {
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> - int i, err, fw_ref;
>>>>> + int i, fw_ref;
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> if (!fw_ref)
>>>>> @@ -198,9 +198,9 @@ static void xelp_dump(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> XELP_MEM_TYPE_STR_MAP[mem_type], pat);
>>>>> }
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> err_fw:
>>>>> - xe_assert(xe, (fw_ref && !err));
>>>>> + xe_assert(xe, !fw_ref);
>>>>> }
>>>>> static const struct xe_pat_ops xelp_pat_ops = {
>>>>> @@ -211,7 +211,7 @@ static const struct xe_pat_ops xelp_pat_ops = {
>>>>> static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
>>>>> {
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> - int i, err, fw_ref;
>>>>> + int i, fw_ref;
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> if (!fw_ref)
>>>>> @@ -229,9 +229,9 @@ static void xehp_dump(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> XELP_MEM_TYPE_STR_MAP[mem_type], pat);
>>>>> }
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> err_fw:
>>>>> - xe_assert(xe, (fw_ref && !err));
>>>>> + xe_assert(xe, !fw_ref);
>>>>> }
>>>>> static const struct xe_pat_ops xehp_pat_ops = {
>>>>> @@ -242,7 +242,7 @@ static const struct xe_pat_ops xehp_pat_ops = {
>>>>> static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
>>>>> {
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> - int i, err, fw_ref;
>>>>> + int i, fw_ref;
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> if (!fw_ref)
>>>>> @@ -258,9 +258,9 @@ static void xehpc_dump(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
>>>>> }
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> err_fw:
>>>>> - xe_assert(xe, (fw_ref && !err));
>>>>> + xe_assert(xe, !fw_ref);
>>>>> }
>>>>> static const struct xe_pat_ops xehpc_pat_ops = {
>>>>> @@ -271,7 +271,7 @@ static const struct xe_pat_ops xehpc_pat_ops = {
>>>>> static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
>>>>> {
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> - int i, err, fw_ref;
>>>>> + int i, fw_ref;
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> if (!fw_ref)
>>>>> @@ -292,9 +292,9 @@ static void xelpg_dump(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
>>>>> }
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> err_fw:
>>>>> - xe_assert(xe, (fw_ref && !err));
>>>>> + xe_assert(xe, !fw_ref);
>>>>> }
>>>>> /*
>>>>> @@ -330,7 +330,7 @@ static void xe2lpm_program_pat(struct xe_gt
>>>>> *gt, const struct xe_pat_table_entry
>>>>> static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
>>>>> {
>>>>> struct xe_device *xe = gt_to_xe(gt);
>>>>> - int i, err, fw_ref;
>>>>> + int i, fw_ref;
>>>>> u32 pat;
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>>>>> @@ -374,9 +374,9 @@ static void xe2_dump(struct xe_gt *gt, struct
>>>>> drm_printer *p)
>>>>> REG_FIELD_GET(XE2_COH_MODE, pat),
>>>>> pat);
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> err_fw:
>>>>> - xe_assert(xe, (fw_ref && !err));
>>>>> + xe_assert(xe, !fw_ref);
>>>>> }
>>>>> static const struct xe_pat_ops xe2_pat_ops = {
>>>>> diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/
>>>>> xe_query.c
>>>>> index 7c866b736f00..df7bdf3822cc 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_query.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_query.c
>>>>> @@ -153,7 +153,7 @@ query_engine_cycles(struct xe_device *xe,
>>>>> fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
>>>>> if (fw_ref != XE_FORCEWAKE_ALL) {
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return -EIO;
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/
>>>>> xe_reg_sr.c
>>>>> index 6ab6a48b1d29..874523f22f56 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_reg_sr.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_reg_sr.c
>>>>> @@ -202,14 +202,12 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr
>>>>> *sr, struct xe_gt *gt)
>>>>> xa_for_each(&sr->xa, reg, entry)
>>>>> apply_one_mmio(gt, entry);
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> - XE_WARN_ON(err);
>>>>> -
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return;
>>>>> err_force_wake:
>>>>> err = -ETIMEDOUT;
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> xe_gt_err(gt, "Failed to apply, err=%d\n", err);
>>>>> }
>>>>> @@ -256,14 +254,12 @@ void xe_reg_sr_apply_whitelist(struct
>>>>> xe_hw_engine *hwe)
>>>>> xe_mmio_write32(>->mmio,
>>>>> RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
>>>>> }
>>>>> - err = xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> - XE_WARN_ON(err);
>>>>> -
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> return;
>>>>> err_force_wake:
>>>>> err = -ETIMEDOUT;
>>>>> - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw_ref));
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/
>>>>> xe_vram.c
>>>>> index 3b5256d76d9a..1384c97a75c2 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_vram.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_vram.c
>>>>> @@ -263,7 +263,8 @@ static int tile_vram_size(struct xe_tile *tile,
>>>>> u64 *vram_size,
>>>>> /* remove the tile offset so we have just the available size */
>>>>> *vram_size = offset - *tile_offset;
>>>>> - return xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + xe_force_wake_put(gt_to_fw(gt), fw_ref);
>>>>> + return 0;
>>>>> }
>>>>> static void vram_fini(void *arg)
>>>>
>>
>
More information about the Intel-xe
mailing list