[Intel-xe] [PATCH 2/2] drm/xe: nuke GuC on unload

Matthew Auld matthew.auld at intel.com
Thu Aug 24 15:19:12 UTC 2023


On 24/08/2023 15:26, Matthew Brost wrote:
> On Wed, Aug 23, 2023 at 06:55:53PM +0100, Matthew Auld wrote:
>> On PVC unloading followed by reloading the module often results in a
>> completely dead machine (seems to be plaguing CI). Resetting the GuC
>> like we do at load seems to cure it at least when locally testing this.
>>
>> References: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/542
>> References: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/597
>> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
>> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
> 
> Seems reasonible to reset the GuC on driver unload, one question below.
> 
>> ---
>>   drivers/gpu/drm/xe/xe_guc.c | 16 ++++++++++++++++
>>   drivers/gpu/drm/xe/xe_uc.c  |  5 +++++
>>   drivers/gpu/drm/xe/xe_uc.h  |  1 +
>>   3 files changed, 22 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
>> index e102637c0695..bbe06686a706 100644
>> --- a/drivers/gpu/drm/xe/xe_guc.c
>> +++ b/drivers/gpu/drm/xe/xe_guc.c
>> @@ -5,6 +5,8 @@
>>   
>>   #include "xe_guc.h"
>>   
>> +#include <drm/drm_managed.h>
>> +
>>   #include "generated/xe_wa_oob.h"
>>   #include "regs/xe_gt_regs.h"
>>   #include "regs/xe_guc_regs.h"
>> @@ -20,6 +22,7 @@
>>   #include "xe_guc_submit.h"
>>   #include "xe_mmio.h"
>>   #include "xe_platform_types.h"
>> +#include "xe_uc.h"
>>   #include "xe_uc_fw.h"
>>   #include "xe_wa.h"
>>   #include "xe_wopcm.h"
>> @@ -217,6 +220,15 @@ static void guc_write_params(struct xe_guc *guc)
>>   		xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
>>   }
>>   
>> +static void guc_fini(struct drm_device *drm, void *arg)
>> +{
>> +	struct xe_guc *guc = arg;
>> +
>> +	xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
>> +	xe_uc_fini_hw(&guc_to_gt(guc)->uc);
>> +	xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
>> +}
>> +
>>   int xe_guc_init(struct xe_guc *guc)
>>   {
>>   	struct xe_device *xe = guc_to_xe(guc);
>> @@ -240,6 +252,10 @@ int xe_guc_init(struct xe_guc *guc)
>>   	if (ret)
>>   		goto out;
>>   
>> +	ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
>> +	if (ret)
>> +		goto out;
>> +
> 
> Any reason this is after xe_guc_ct_init but before xe_guc_pc_init? Seems
> like odd placement.

Yeah, I think the issue was that pc_fini() needs the GuC to be alive, so 
order needs to be that guc_fini() is called last. I can try to move the 
pc_fini() into guc_fini() and see if that is cleaner.

> 
> Matt
> 
>>   	ret = xe_guc_pc_init(&guc->pc);
>>   	if (ret)
>>   		goto out;
>> diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
>> index addd6f2681b9..9c8ce504f4da 100644
>> --- a/drivers/gpu/drm/xe/xe_uc.c
>> +++ b/drivers/gpu/drm/xe/xe_uc.c
>> @@ -167,6 +167,11 @@ int xe_uc_init_hw(struct xe_uc *uc)
>>   	return 0;
>>   }
>>   
>> +int xe_uc_fini_hw(struct xe_uc *uc)
>> +{
>> +	return xe_uc_sanitize_reset(uc);
>> +}
>> +
>>   int xe_uc_reset_prepare(struct xe_uc *uc)
>>   {
>>   	/* GuC submission not enabled, nothing to do */
>> diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
>> index 42219b361df5..4109ae7028af 100644
>> --- a/drivers/gpu/drm/xe/xe_uc.h
>> +++ b/drivers/gpu/drm/xe/xe_uc.h
>> @@ -12,6 +12,7 @@ int xe_uc_init(struct xe_uc *uc);
>>   int xe_uc_init_hwconfig(struct xe_uc *uc);
>>   int xe_uc_init_post_hwconfig(struct xe_uc *uc);
>>   int xe_uc_init_hw(struct xe_uc *uc);
>> +int xe_uc_fini_hw(struct xe_uc *uc);
>>   void xe_uc_gucrc_disable(struct xe_uc *uc);
>>   int xe_uc_reset_prepare(struct xe_uc *uc);
>>   void xe_uc_stop_prepare(struct xe_uc *uc);
>> -- 
>> 2.41.0
>>


More information about the Intel-xe mailing list