[PATCH v8 2/4] drm/xe: Don't update wedged mode in case of an error
Laguna, Lukasz
lukasz.laguna at intel.com
Wed Aug 6 08:37:15 UTC 2025
On 8/6/2025 00:15, Rodrigo Vivi wrote:
> On Thu, Jul 31, 2025 at 02:37:54PM +0200, Lukasz Laguna wrote:
>> Change driver's internal wedged.mode state only in case of a success and
>> update GuC's reset policy only when it's necessary.
>>
>> Fixes: 6b8ef44cc0a9 ("drm/xe: Introduce the wedged_mode debugfs")
>> Signed-off-by: Lukasz Laguna <lukasz.laguna at intel.com>
>> ---
>> v7: Don't introduce XE_WEDGED_MODE_MISCONFIGURED enum field (Michal)
>> Add needs_policy_update helper (Michal)
>> Rename wedged_mode_set_reset_policy to set_reset_policy (Lukasz)
>> ---
>> drivers/gpu/drm/xe/xe_debugfs.c | 72 ++++++++++++++++++++++------
>> drivers/gpu/drm/xe/xe_device_types.h | 2 +
>> drivers/gpu/drm/xe/xe_guc_ads.c | 12 ++---
>> drivers/gpu/drm/xe/xe_guc_ads.h | 4 +-
>> 4 files changed, 68 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
>> index 16b2e306559a..8aff93401eb9 100644
>> --- a/drivers/gpu/drm/xe/xe_debugfs.c
>> +++ b/drivers/gpu/drm/xe/xe_debugfs.c
>> @@ -251,14 +251,64 @@ static ssize_t wedged_mode_show(struct file *f, char __user *ubuf,
>> return simple_read_from_buffer(ubuf, size, pos, buf, len);
>> }
>>
>> +static int __set_reset_policy(struct xe_gt *gt, enum xe_wedged_mode mode)
>> +{
>> + int ret;
>> +
>> + ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads,
>> + !(mode == XE_WEDGED_MODE_UPON_ANY_HANG));
> mode != XE_WEDGED_MODE_UPON_ANY_HANG please
OK
>
>> + if (ret)
>> + xe_gt_err(gt, "Failed to update GuC ADS scheduler policy (%pe)\n", ERR_PTR(ret));
>> +
>> + return ret;
>> +}
>> +
>> +static int set_reset_policy(struct xe_device *xe, enum xe_wedged_mode mode)
>> +{
>> + struct xe_gt *gt;
>> + int ret;
>> + u8 id;
>> +
>> + xe_pm_runtime_get(xe);
>> + for_each_gt(gt, xe, id) {
>> + ret = __set_reset_policy(gt, mode);
>> + if (ret) {
>> + if (id > 0) {
>> + xe->wedged.inconsistent_reset = true;
>> + drm_err(&xe->drm, "Inconsistent reset policy state between GTs\n");
>> + }
>> +
>> + xe_pm_runtime_put(xe);
>> + return ret;
> Why to return on the first GT? perhaps we should continue and just leave one
> behind?
But why continue if we already have an error on the first GT? If it
succeeds on next GTs, we'll end up with inconsistent state.
> perhaps we should have a 3 times retry attempt logic before giving up?
OK, I'll add retry logic.
> perhaps both combined?
>
>> + }
>> + }
>> + xe_pm_runtime_put(xe);
>> +
>> + xe->wedged.inconsistent_reset = false;
> then move this to the beginning of the function
> also ret = 0; up there
OK
>
>> +
>> + return 0;
> and simply return ret; here
OK
>
>> +}
>> +
>> +static bool needs_policy_update(struct xe_device *xe, enum xe_wedged_mode mode)
>> +{
>> + if (xe->wedged.inconsistent_reset)
>> + return true;
>> +
>> + if (xe->wedged.mode == mode)
>> + return false;
>> +
>> + return !((xe->wedged.mode == XE_WEDGED_MODE_NEVER &&
>> + mode == XE_WEDGED_MODE_UPON_CRITICAL_ERROR) ||
>> + (xe->wedged.mode == XE_WEDGED_MODE_UPON_CRITICAL_ERROR &&
>> + mode == XE_WEDGED_MODE_NEVER));
> This is worst then the one above... please expand this to inside
> the function instead of this overloaded return full of not-or-and logic.
OK, I'll expand this to inside the function.
I just realized that additionally this logic can be simplified. Policy
update is needed only in case when we change mode to or from
XE_WEDGED_MODE_UPON_ANY_HANG, so it's enough to check this.
Thanks for review.
>
> Some wording about what cases are this in the commit message should be very
> helpful
>
>> +}
>> +
>> static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
>> size_t size, loff_t *pos)
>> {
>> struct xe_device *xe = file_inode(f)->i_private;
>> - struct xe_gt *gt;
>> u32 wedged_mode;
>> ssize_t ret;
>> - u8 id;
>>
>> ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode);
>> if (ret)
>> @@ -268,22 +318,14 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
>> if (ret)
>> return ret;
>>
>> - if (xe->wedged.mode == wedged_mode)
>> - return size;
>> + if (needs_policy_update(xe, wedged_mode)) {
>> + ret = set_reset_policy(xe, wedged_mode);
>> + if (ret)
>> + return ret;
>> + }
>>
>> xe->wedged.mode = wedged_mode;
>>
>> - xe_pm_runtime_get(xe);
>> - for_each_gt(gt, xe, id) {
>> - ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads);
>> - if (ret) {
>> - xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
>> - xe_pm_runtime_put(xe);
>> - return -EIO;
>> - }
>> - }
>> - xe_pm_runtime_put(xe);
>> -
>> return size;
>> }
>>
>> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
>> index 1e496845c91f..829252db3a47 100644
>> --- a/drivers/gpu/drm/xe/xe_device_types.h
>> +++ b/drivers/gpu/drm/xe/xe_device_types.h
>> @@ -546,6 +546,8 @@ struct xe_device {
>> XE_WEDGED_MODE_UPON_ANY_HANG = 2,
>> XE_WEDGED_MODE_DEFAULT = XE_WEDGED_MODE_UPON_CRITICAL_ERROR,
>> } mode;
>> + /** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */
>> + bool inconsistent_reset;
>> } wedged;
>>
>> /** @bo_device: Struct to control async free of BOs */
>> diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
>> index c4ea4e6d82ce..7f58d77e0ab9 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_ads.c
>> +++ b/drivers/gpu/drm/xe/xe_guc_ads.c
>> @@ -1033,16 +1033,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
>> /**
>> * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
>> * @ads: Additional data structures object
>> + * @enable: true to enable engine resets, false otherwise
>> *
>> - * This function update the GuC's engine reset policy based on wedged.mode.
>> + * This function update the GuC's engine reset policy.
>> *
>> * Return: 0 on success, and negative error code otherwise.
>> */
>> -int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
>> +int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, bool enable)
>> {
>> struct guc_policies *policies;
>> struct xe_guc *guc = ads_to_guc(ads);
>> - struct xe_device *xe = ads_to_xe(ads);
>> CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
>>
>> if (!xe_guc_buf_is_valid(buf))
>> @@ -1054,10 +1054,10 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
>> policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
>> policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
>> policies->is_valid = 1;
>> - if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG)
>> - policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
>> - else
>> + if (enable)
>> policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
>> + else
>> + policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
>>
>> return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
>> }
>> diff --git a/drivers/gpu/drm/xe/xe_guc_ads.h b/drivers/gpu/drm/xe/xe_guc_ads.h
>> index 2e6674c760ff..9879aadd22d6 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_ads.h
>> +++ b/drivers/gpu/drm/xe/xe_guc_ads.h
>> @@ -6,6 +6,8 @@
>> #ifndef _XE_GUC_ADS_H_
>> #define _XE_GUC_ADS_H_
>>
>> +#include <linux/types.h>
>> +
>> struct xe_guc_ads;
>>
>> int xe_guc_ads_init(struct xe_guc_ads *ads);
>> @@ -13,6 +15,6 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads);
>> void xe_guc_ads_populate(struct xe_guc_ads *ads);
>> void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads);
>> void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads);
>> -int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads);
>> +int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, bool enable);
>>
>> #endif
>> --
>> 2.40.0
>>
More information about the Intel-xe
mailing list