[Intel-gfx] [PATCH 4/6] drm/i915/debugfs: move uC printers and update debugfs file names

Daniele Ceraolo Spurio daniele.ceraolospurio at intel.com
Wed Mar 11 23:38:55 UTC 2020


<snip>

>> +/**
>> + * intel_huc_load_status - dump information about HuC load status
>> + * @huc: the HuC
>> + * @p: the &drm_printer
>> + *
>> + * Pretty printer for HuC load status.
>> + */
>> +void intel_huc_load_status(const struct intel_huc *huc, struct 
>> drm_printer *p)
>> +{
>> +    struct intel_gt *gt = huc_to_gt(huc);
>> +    intel_wakeref_t wakeref;
>> +
>> +    if (!intel_huc_is_supported(huc)) {
>> +        drm_printf(p, "HuC not supported\n");
>> +        return;
>> +    }
>> +
>> +    if (!intel_huc_is_wanted(huc)) {
>> +        drm_printf(p, "HuC disabled\n");
>> +        return;
>> +    }
>> +
>> +    intel_uc_fw_dump(&huc->fw, p);
>> +
>> +    with_intel_runtime_pm(gt->uncore->rpm, wakeref)
>> +        drm_printf(p, "\nHuC status 0x%08x:\n",
>> +               intel_uncore_read(gt->uncore, huc->status.reg));
> 
> HUC_STATUS register is still available on Gen11+. But if the purpose 
> here is to print the status of huc load/auth, then the change makes sense.
> 

The load/auth status is already dumped as part of intel_uc_fw_dump(). I 
though all the state had transferred over to the new register, but since 
I was wrong I'll keep using HUC_STATUS.

Thanks for catching this,
Daniele

> Thanks,
> Tony
> 
>> +}
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h 
>> b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
>> index 19651b46d6a4..f1299c0138e5 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
>> @@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(const 
>> struct intel_huc *huc)
>>       return intel_uc_fw_is_running(&huc->fw);
>>   }
>> +void intel_huc_load_status(const struct intel_huc *huc, struct 
>> drm_printer *p);
>> +
>>   #endif
>> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
>> b/drivers/gpu/drm/i915/i915_debugfs.c
>> index 37cb8b4bf4dc..1bec4cdeb92f 100644
>> --- a/drivers/gpu/drm/i915/i915_debugfs.c
>> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
>> @@ -1465,105 +1465,32 @@ static int i915_llc(struct seq_file *m, void 
>> *data)
>>       return 0;
>>   }
>> -static int i915_huc_load_status_info(struct seq_file *m, void *data)
>> +static int i915_huc_info(struct seq_file *m, void *data)
>>   {
>>       struct drm_i915_private *dev_priv = node_to_i915(m->private);
>> -    intel_wakeref_t wakeref;
>> -    struct drm_printer p;
>> -
>> -    if (!HAS_GT_UC(dev_priv))
>> -        return -ENODEV;
>> -
>> -    p = drm_seq_file_printer(m);
>> -    intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
>> -
>> -    with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
>> -        seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
>> -
>> -    return 0;
>> -}
>> -
>> -static int i915_guc_load_status_info(struct seq_file *m, void *data)
>> -{
>> -    struct drm_i915_private *dev_priv = node_to_i915(m->private);
>> -    intel_wakeref_t wakeref;
>> -    struct drm_printer p;
>> +    struct intel_huc *huc = &dev_priv->gt.uc.huc;
>> +    struct drm_printer p = drm_seq_file_printer(m);
>> -    if (!HAS_GT_UC(dev_priv))
>> +    if (!intel_huc_is_supported(huc))
>>           return -ENODEV;
>> -    p = drm_seq_file_printer(m);
>> -    intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
>> -
>> -    with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
>> -        u32 tmp = I915_READ(GUC_STATUS);
>> -        u32 i;
>> -
>> -        seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
>> -        seq_printf(m, "\tBootrom status = 0x%x\n",
>> -               (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
>> -        seq_printf(m, "\tuKernel status = 0x%x\n",
>> -               (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
>> -        seq_printf(m, "\tMIA Core status = 0x%x\n",
>> -               (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
>> -        seq_puts(m, "\nScratch registers:\n");
>> -        for (i = 0; i < 16; i++) {
>> -            seq_printf(m, "\t%2d: \t0x%x\n",
>> -                   i, I915_READ(SOFT_SCRATCH(i)));
>> -        }
>> -    }
>> +    intel_huc_load_status(huc, &p);
>>       return 0;
>>   }
>> -static const char *
>> -stringify_guc_log_type(enum guc_log_buffer_type type)
>> -{
>> -    switch (type) {
>> -    case GUC_ISR_LOG_BUFFER:
>> -        return "ISR";
>> -    case GUC_DPC_LOG_BUFFER:
>> -        return "DPC";
>> -    case GUC_CRASH_DUMP_LOG_BUFFER:
>> -        return "CRASH";
>> -    default:
>> -        MISSING_CASE(type);
>> -    }
>> -
>> -    return "";
>> -}
>> -
>> -static void i915_guc_log_info(struct seq_file *m, struct 
>> intel_guc_log *log)
>> -{
>> -    enum guc_log_buffer_type type;
>> -
>> -    if (!intel_guc_log_relay_created(log)) {
>> -        seq_puts(m, "GuC log relay not created\n");
>> -        return;
>> -    }
>> -
>> -    seq_puts(m, "GuC logging stats:\n");
>> -
>> -    seq_printf(m, "\tRelay full count: %u\n",
>> -           log->relay.full_count);
>> -
>> -    for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
>> -        seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
>> -               stringify_guc_log_type(type),
>> -               log->stats[type].flush,
>> -               log->stats[type].sampled_overflow);
>> -    }
>> -}
>> -
>>   static int i915_guc_info(struct seq_file *m, void *data)
>>   {
>>       struct drm_i915_private *dev_priv = node_to_i915(m->private);
>> -    struct intel_uc *uc = &dev_priv->gt.uc;
>> +    struct intel_guc *guc = &dev_priv->gt.uc.guc;
>> +    struct drm_printer p = drm_seq_file_printer(m);
>> -    if (!intel_uc_uses_guc(uc))
>> +    if (!intel_guc_is_supported(guc))
>>           return -ENODEV;
>> -    i915_guc_log_info(m, &uc->guc.log);
>> +    intel_guc_load_status(guc, &p);
>> +    drm_puts(&p, "\n");
>> +    intel_guc_log_info(&guc->log, &p);
>>       /* Add more as required ... */
>> @@ -1574,39 +1501,14 @@ static int i915_guc_log_dump(struct seq_file 
>> *m, void *data)
>>   {
>>       struct drm_info_node *node = m->private;
>>       struct drm_i915_private *dev_priv = node_to_i915(node);
>> +    struct intel_guc *guc = &dev_priv->gt.uc.guc;
>>       bool dump_load_err = !!node->info_ent->data;
>> -    struct drm_i915_gem_object *obj = NULL;
>> -    u32 *log;
>> -    int i = 0;
>> +    struct drm_printer p = drm_seq_file_printer(m);
>> -    if (!HAS_GT_UC(dev_priv))
>> +    if (!intel_guc_is_supported(guc))
>>           return -ENODEV;
>> -    if (dump_load_err)
>> -        obj = dev_priv->gt.uc.load_err_log;
>> -    else if (dev_priv->gt.uc.guc.log.vma)
>> -        obj = dev_priv->gt.uc.guc.log.vma->obj;
>> -
>> -    if (!obj)
>> -        return 0;
>> -
>> -    log = i915_gem_object_pin_map(obj, I915_MAP_WC);
>> -    if (IS_ERR(log)) {
>> -        DRM_DEBUG("Failed to pin object\n");
>> -        seq_puts(m, "(log data unaccessible)\n");
>> -        return PTR_ERR(log);
>> -    }
>> -
>> -    for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
>> -        seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
>> -               *(log + i), *(log + i + 1),
>> -               *(log + i + 2), *(log + i + 3));
>> -
>> -    seq_putc(m, '\n');
>> -
>> -    i915_gem_object_unpin_map(obj);
>> -
>> -    return 0;
>> +    return intel_guc_log_dump(&guc->log, &p, dump_load_err);
>>   }
>>   static int i915_guc_log_level_get(void *data, u64 *val)
>> @@ -2302,10 +2204,9 @@ static const struct drm_info_list 
>> i915_debugfs_list[] = {
>>       {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
>>       {"i915_gem_interrupt", i915_interrupt_info, 0},
>>       {"i915_guc_info", i915_guc_info, 0},
>> -    {"i915_guc_load_status", i915_guc_load_status_info, 0},
>>       {"i915_guc_log_dump", i915_guc_log_dump, 0},
>>       {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
>> -    {"i915_huc_load_status", i915_huc_load_status_info, 0},
>> +    {"i915_huc_info", i915_huc_info, 0},
>>       {"i915_frequency_info", i915_frequency_info, 0},
>>       {"i915_drpc_info", i915_drpc_info, 0},
>>       {"i915_ring_freq_table", i915_ring_freq_table, 0},
>>


More information about the Intel-gfx mailing list