[Intel-xe] [PATCH] drm/xe: Move VM entries print to a trace

Welty, Brian brian.welty at intel.com
Fri Oct 20 20:21:33 UTC 2023



On 10/20/2023 12:10 PM, Summers, Stuart wrote:
> On Fri, 2023-10-20 at 10:44 -0700, Welty, Brian wrote:
>> On 10/20/2023 10:21 AM, Stuart Summers wrote:
>>> While this information can be useful for debug, it is a little
>>> verbose to dump during a CI run, resulting in test failures due
>>> to disk limit issues in the CI machines.
>>
>> But why does CI have this enabled?
>>
>> The description for DRM_XE_DEBUG_VM says:  "Recommended for driver
>> developers only."   I don't think it belongs turned on for CI.
> 
> Ok that's a good point. Other than disrupting the status quo, is the
> worry about tracing that we'll have a huge range of binds and overflow
> the logs there where dmesg storage is a little easier to maintain
> through the syslog?

Just thought it was worth asking...  as in, is this an actual problem or 
not.  Or is just CI need to disable this in Kconfig, or as you say, 
could constrain how much logging it will keep on disk.

Certainly whether this belongs in dmesg or should be converted to 
tracepoints is a discussion worth having.....
But for that, it's best to involve the folks that have made the most use
of this during real world debug.....  I'll defer to others on that, and
let the other thread discuss it!

-Brian



> 
> Thanks,
> Stuart
> 
>>
>>
>>>
>>> For now, move these debug prints to a trace for manual debug.
>>>
>>> Signed-off-by: Stuart Summers <stuart.summers at intel.com>
>>> ---
>>>    drivers/gpu/drm/xe/xe_pt.c    | 35 +++++-------------------------
>>>    drivers/gpu/drm/xe/xe_pt.h    |  8 +++++++
>>>    drivers/gpu/drm/xe/xe_trace.h | 41
>>> +++++++++++++++++++++++++++++++++++
>>>    3 files changed, 54 insertions(+), 30 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/xe/xe_pt.c
>>> b/drivers/gpu/drm/xe/xe_pt.c
>>> index 31afab617b4e..eb905878c4cb 100644
>>> --- a/drivers/gpu/drm/xe/xe_pt.c
>>> +++ b/drivers/gpu/drm/xe/xe_pt.c
>>> @@ -908,6 +908,9 @@ static void xe_pt_commit_bind(struct xe_vma
>>> *vma,
>>>    
>>>                          pt_dir->dir.entries[j_] = &newpte->base;
>>>                  }
>>> +
>>> +               trace_xe_pt_commit_bind(&entries[i]);
>>> +
>>>                  kfree(entries[i].pt_entries);
>>>          }
>>>    }
>>> @@ -929,34 +932,6 @@ xe_pt_prepare_bind(struct xe_tile *tile,
>>> struct xe_vma *vma,
>>>          return err;
>>>    }
>>>    
>>> -static void xe_vm_dbg_print_entries(struct xe_device *xe,
>>> -                                   const struct
>>> xe_vm_pgtable_update *entries,
>>> -                                   unsigned int num_entries)
>>> -#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
>>> -{
>>> -       unsigned int i;
>>> -
>>> -       vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
>>> -       for (i = 0; i < num_entries; i++) {
>>> -               const struct xe_vm_pgtable_update *entry =
>>> &entries[i];
>>> -               struct xe_pt *xe_pt = entry->pt;
>>> -               u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
>>> -               u64 end;
>>> -               u64 start;
>>> -
>>> -               xe_assert(xe, !entry->pt->is_compact);
>>> -               start = entry->ofs * page_size;
>>> -               end = start + page_size * entry->qwords;
>>> -               vm_dbg(&xe->drm,
>>> -                      "\t%u: Update level %u at (%u + %u)
>>> [%llx...%llx) f:%x\n",
>>> -                      i, xe_pt->level, entry->ofs, entry->qwords,
>>> -                      xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt)
>>> + end, 0);
>>> -       }
>>> -}
>>> -#else
>>> -{}
>>> -#endif
>>> -
>>>    #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
>>>    
>>>    static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
>>> @@ -1276,7 +1251,6 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct
>>> xe_vma *vma, struct xe_exec_queue
>>>                  goto err;
>>>          xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
>>>    
>>> -       xe_vm_dbg_print_entries(tile_to_xe(tile), entries,
>>> num_entries);
>>>          xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
>>>                                     num_entries);
>>>    
>>> @@ -1563,6 +1537,8 @@ xe_pt_commit_unbind(struct xe_vma *vma,
>>>                                  pt_dir->dir.entries[i] = NULL;
>>>                          }
>>>                  }
>>> +
>>> +               trace_xe_pt_commit_unbind(entry);
>>>          }
>>>    }
>>>    
>>> @@ -1627,7 +1603,6 @@ __xe_pt_unbind_vma(struct xe_tile *tile,
>>> struct xe_vma *vma, struct xe_exec_queu
>>>          num_entries = xe_pt_stage_unbind(tile, vma, entries);
>>>          xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
>>>    
>>> -       xe_vm_dbg_print_entries(tile_to_xe(tile), entries,
>>> num_entries);
>>>          xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
>>>                                     num_entries);
>>>    
>>> diff --git a/drivers/gpu/drm/xe/xe_pt.h
>>> b/drivers/gpu/drm/xe/xe_pt.h
>>> index d5460e58dbbf..c7e5f7111227 100644
>>> --- a/drivers/gpu/drm/xe/xe_pt.h
>>> +++ b/drivers/gpu/drm/xe/xe_pt.h
>>> @@ -18,6 +18,14 @@ struct xe_tile;
>>>    struct xe_vm;
>>>    struct xe_vma;
>>>    
>>> +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
>>> +#define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr =
>>> (__addr))
>>> +#define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
>>> +#else
>>> +#define xe_pt_set_addr(__xe_pt, __addr)
>>> +#define xe_pt_addr(__xe_pt) 0ull
>>> +#endif
>>> +
>>>    #define xe_pt_write(xe, map, idx, data) \
>>>          xe_map_wr(xe, map, (idx) * sizeof(u64), u64, data)
>>>    
>>> diff --git a/drivers/gpu/drm/xe/xe_trace.h
>>> b/drivers/gpu/drm/xe/xe_trace.h
>>> index e32f1cad51d9..24d898061d04 100644
>>> --- a/drivers/gpu/drm/xe/xe_trace.h
>>> +++ b/drivers/gpu/drm/xe/xe_trace.h
>>> @@ -17,8 +17,49 @@
>>>    #include "xe_gt_tlb_invalidation_types.h"
>>>    #include "xe_gt_types.h"
>>>    #include "xe_guc_exec_queue_types.h"
>>> +#include "xe_pt_types.h"
>>>    #include "xe_sched_job.h"
>>>    #include "xe_vm.h"
>>> +#include "xe_pt.h"
>>> +
>>> +DECLARE_EVENT_CLASS(xe_vm_pgtable_update,
>>> +                   TP_PROTO(struct xe_vm_pgtable_update *update),
>>> +                   TP_ARGS(update),
>>> +
>>> +                   TP_STRUCT__entry(
>>> +                            __field(u64, page_size)
>>> +                            __field(u64, end)
>>> +                            __field(u64, start)
>>> +                            __field(u64, pt_addr)
>>> +                            __field(unsigned int, level)
>>> +                            __field(u32, ofs)
>>> +                            __field(u32, qwords)
>>> +                            ),
>>> +
>>> +                   TP_fast_assign(
>>> +                          __entry->level = xe_pt_shift(update->pt-
>>>> level);
>>> +                          __entry->page_size = 1ull << __entry-
>>>> level;
>>> +                          __entry->pt_addr = xe_pt_addr(update-
>>>> pt);
>>> +                          __entry->ofs = update->ofs;
>>> +                          __entry->qwords = update->qwords;
>>> +                          __entry->start = __entry->pt_addr +
>>> update->ofs * __entry->page_size;
>>> +                          __entry->end = __entry->start + __entry-
>>>> page_size * update->qwords;
>>> +                          ),
>>> +
>>> +                   TP_printk("Update level %u at (%u + %u)
>>> [%llx...%llx]",
>>> +                             __entry->level, __entry->ofs,
>>> __entry->qwords,
>>> +                             __entry->start, __entry->end)
>>> +);
>>> +
>>> +DEFINE_EVENT(xe_vm_pgtable_update, xe_pt_commit_bind,
>>> +            TP_PROTO(struct xe_vm_pgtable_update *entry),
>>> +            TP_ARGS(entry)
>>> +);
>>> +
>>> +DEFINE_EVENT(xe_vm_pgtable_update, xe_pt_commit_unbind,
>>> +            TP_PROTO(struct xe_vm_pgtable_update *entry),
>>> +            TP_ARGS(entry)
>>> +);
>>>    
>>>    DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
>>>                      TP_PROTO(struct xe_gt_tlb_invalidation_fence
>>> *fence),
> 


More information about the Intel-xe mailing list