[PATCH v7 4/4] drm/xe/vf: Fixup CTB send buffer messages after migration
Lis, Tomasz
tomasz.lis at intel.com
Wed Apr 9 21:09:38 UTC 2025
On 08.04.2025 16:23, Michal Wajdeczko wrote:
> On 03.04.2025 20:40, Tomasz Lis wrote:
>> During post-migration recovery of a VF, it is necessary to update
>> GGTT references included in messages which are going to be sent
>> to GuC. GuC will start consuming messages after VF KMD will inform
>> it about fixups being done; before that, the VF KMD is expected
>> to update any H2G messages which are already in send buffer but
>> were not consumed by GuC.
>>
>> Only a small subset of messages allowed for VFs have GGTT references
>> in them. This patch adds the functionality to parse the CTB send
>> ring buffer and shift addresses contained within.
>>
>> While fixing the CTB content, ct->lock is not taken. This means
>> the only barrier taken remains GGTT address lock - which is ok,
>> because only requests with GGTT addresses matter, but it also means
>> tail changes can happen during the CTB fixups execution (which may
>> be ignored as any new messages will not have anything to fix).
>>
>> The GGTT address locking will be introduced in a future series.
>>
>> v2: removed storing shift as that's now done in VMA nodes patch;
>> macros to inlines; warns to asserts; log messages fixes (Michal)
>> v3: removed inline keywords, enums for offsets in CTB messages,
>> less error messages, if return unused then made functs void (Michal)
>> v4: update the cached head before starting fixups
>> v5: removed/updated comments, wrapped lines, converted assert into
>> error, enums for offsets to separate patch, reused xe_map_rd
>> v6: define xe_map_*_array() macros, support CTB wrap which divides
>> a message, updated comments, moved one function to an earlier patch
>>
>> Signed-off-by: Tomasz Lis<tomasz.lis at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_guc_ct.c | 147 +++++++++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_guc_ct.h | 2 +
>> drivers/gpu/drm/xe/xe_map.h | 12 +++
>> drivers/gpu/drm/xe/xe_sriov_vf.c | 18 ++++
>> 4 files changed, 179 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
>> index 686fe664c20d..add2d9b12345 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
>> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
>> @@ -84,6 +84,8 @@ struct g2h_fence {
>> bool done;
>> };
>>
>> +#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
>> +
>> static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
>> {
>> g2h_fence->response_buffer = response_buffer;
>> @@ -1622,6 +1624,151 @@ static void g2h_worker_func(struct work_struct *w)
>> receive_g2h(ct);
>> }
>>
>> +static void xe_ring_map_fixup_u64(struct xe_device *xe, struct iosys_map *cmds,
>> + u32 size, u32 head, u32 pos, s64 shift)
>> +{
>> + u32 hi, lo;
>> + u64 offset;
>> +
>> + lo = xe_map_rd_array_u32(xe, cmds, (head + pos) % size);
>> + hi = xe_map_rd_array_u32(xe, cmds, (head + pos + 1) % size);
>> + offset = make_u64(hi, lo);
>> + offset += shift;
>> + lo = lower_32_bits(offset);
>> + hi = upper_32_bits(offset);
>> + xe_map_wr_array_u32(xe, cmds, (head + pos) % size, lo);
>> + xe_map_wr_array_u32(xe, cmds, (head + pos + 1) % size, hi);
>> +}
>> +
>> +/*
>> + * Shift any GGTT addresses within a single message left within CTB from
>> + * before post-migration recovery.
>> + * @ct: pointer to CT struct of the target GuC
>> + * @cmds: iomap buffer containing CT messages
>> + * @head: start of the target message within the buffer
>> + * @len: length of the target message
>> + * @size: size of the commands buffer
>> + * @shift: the address shift to be added to each GGTT reference
>> + */
>> +static void ct_update_addresses_in_message(struct xe_guc_ct *ct,
> s/ct_update_addresses_in_message/ct_fixup_ggtt_in_message
ok
>> + struct iosys_map *cmds, u32 head,
>> + u32 len, u32 size, s64 shift)
>> +{
>> + struct xe_device *xe = ct_to_xe(ct);
>> + u32 action, i, n;
>> + u32 msg[1];
> u32 msg;
> or
> u32 msg[GUC_HXG_MSG_MIN_LEN];
I want an array, so will go with 2nd. Though this looks like unjustified
obfuscation to me.
>> +
>> + xe_map_memcpy_from(xe, msg, cmds, head * sizeof(u32),
>> + 1 * sizeof(u32));
> please use helpers that you already have:
>
> msg[0] = xe_map_rd_array_u32(xe, cmds, head);
ok
>> + action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
>> + switch (action) {
>> + case XE_GUC_ACTION_REGISTER_CONTEXT:
> maybe don't super-optimize by hand and just do those 3x fixups here,
> using related XE_GUC_REGISTER_CONTEXT_DATA_xxx definitions?
>
> it looks weird to have 2x "case" statements and then "if (case)" anyway,
> plus mixed enums that just accidentally points to the same offsets
will separate cases. The optimizing compiler will merge them anyway, I
did one to avoid introducing more barely used enums.
(the ones we introduced in last round)
>> + case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
>> + xe_ring_map_fixup_u64(xe, cmds, size, head,
>> + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
>> + shift);
>> + xe_ring_map_fixup_u64(xe, cmds, size, head,
>> + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
>> + shift);
>> + if (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC) {
>> + n = xe_map_rd_array_u32(xe, cmds, head +
>> + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS);
>> + for (i = 0; i < n; i++)
>> + xe_ring_map_fixup_u64(xe, cmds, size, head,
>> + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
>> + + 2 * i, shift);
>> + } else {
>> + xe_ring_map_fixup_u64(xe, cmds, size, head,
>> + XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
>> + }
>> + break;
>> + default:
>> + break;
>> + }
>> +}
>> +
>> +static int ct_update_addresses_in_buffer(struct xe_guc_ct *ct,
> s/ct_update_addresses_in_buffer/ct_fixup_ggtt_in_buffer
ok
>> + struct guc_ctb *h2g,
> nit: this is redundant
oh I see. by "this" you mean a spaced out separate line.
>> + s64 shift, u32 *mhead, s32 avail)
> can you describe what is this mhead?
this is a static function. but ok, will describe its parameters.
>> +{
>> + struct xe_device *xe = ct_to_xe(ct);
>> + u32 head = *mhead;
>> + u32 size = h2g->info.size;
>> + u32 msg[1];
> u32 msg;
> or
> u32 msg[GUC_CTB_MSG_MIN_LEN];
>
>> + u32 len;
> please try to order vars in rev-xmas-tree
ok
>> +
>> + /* Read header */
> you should at least assert that avail > 0 before reading even single u32
>
> xe_gt_assert(gt, avail >= GUC_CTB_MSG_MIN_LEN);
This is a static function, and the caller is ensuring that. But sure, I
can add the assert.
> but since it is called in the loop, more appropriate would be:
>
> if (avail < GUC_CTB_MSG_MIN_LEN)
> goto broken;
I don't know what you mean by that. There are not that many
possibilities for an integer value greater than 0 but smaller than 1.
And by the changes I agreed to above and below, we've already ensured
that GUC_CTB_MSG_MIN_LEN can only be equal to 1.
>> + xe_map_memcpy_from(xe, msg, &h2g->cmds, sizeof(u32) * head,
>> + sizeof(u32));
> msg[0] = xe_map_rd_array_u32(xe, cmds, head);
ok
>> + len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
>> +
>> + if (unlikely(len > (u32)avail)) {
>> + xe_gt_err(ct_to_gt(ct), "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
>> + avail, len);
>> + return 0;
>> + }
>> +
>> + head = (head + 1) % size;
> maybe don't update head here as you need to reverse it two lines below?
That value is never being changed again.
> and this magic "1" is GUC_CTB_MSG_MIN_LEN
ok
>> + ct_update_addresses_in_message(ct, &h2g->cmds, head, len - 1, size, shift);
> msg_len_to_hxg_len() instead of "len - 1" ?
wow, we have a helper even decrementing length? ok, will use.
>> + *mhead = (head + len - 1) % size;
> maybe it's time to introduce
>
> u32 move_head(u32 head, u32 step, u32 size)
We are overdefining a lot of things already. I don't think we should
press even harder in that direction.
>> +
>> + return avail - len;
>> +}
>> +
>> +/**
>> + * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
>> + * @ct: pointer to CT struct of the target GuC
>> + * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
>> + *
>> + * Messages in guc-to-host CTB are owned by GuC and any fixups in them
>> + * are made by GuC. But content of the host-to-guc CTB is owned by the
>> + * KMD, so fixups to GGTT references in any pending messages need to be
>> + * applied here.
> s/guc-to-host/H2G
>
> like you have earlier and below
Earlier is a short description. Below I am using shortcut after defining
it in this line. This is how the long comments should look like,
abbreviations should be defined.
>> + * This function updates GGTT offsets in payloads of pending H2G CTB
>> + * messages (messages which were not consumed by GuC before the VF got
>> + * paused).
>> + */
>> +void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
>> +{
>> + struct xe_guc *guc = ct_to_guc(ct);
>> + struct xe_gt *gt = guc_to_gt(guc);
>> + struct guc_ctb *h2g = &ct->ctbs.h2g;
>> + u32 head, tail, size;
>> + s32 avail;
> early exit if shift == 0 ?
There is no need for such optimizations.
-Tomasz
>> +
>> + if (unlikely(h2g->info.broken))
>> + return;
>> +
>> + h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
>> + head = h2g->info.head;
>> + tail = READ_ONCE(h2g->info.tail);
>> + size = h2g->info.size;
>> +
>> + if (unlikely(head > size))
>> + goto corrupted;
>> +
>> + if (unlikely(tail >= size))
>> + goto corrupted;
>> +
>> + avail = tail - head;
>> +
>> + /* beware of buffer wrap case */
>> + if (unlikely(avail < 0))
>> + avail += size;
>> + xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
>> + xe_gt_assert(gt, avail >= 0);
>> +
>> + while (avail > 0)
>> + avail = ct_update_addresses_in_buffer(ct, h2g, ggtt_shift, &head, avail);
>> +
>> + return;
>> +
>> +corrupted:
>> + xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
>> + head, tail, size);
>> + h2g->info.broken = true;
>> +}
>> +
>> static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
>> bool want_ctb)
>> {
>> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
>> index 82c4ae458dda..5649bda82823 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_ct.h
>> +++ b/drivers/gpu/drm/xe/xe_guc_ct.h
>> @@ -22,6 +22,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
>> void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
>> void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
>>
>> +void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
>> +
>> static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
>> {
>> return ct->state == XE_GUC_CT_STATE_ENABLED;
>> diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
>> index f62e0c8b67ab..db98c8fb121f 100644
>> --- a/drivers/gpu/drm/xe/xe_map.h
>> +++ b/drivers/gpu/drm/xe/xe_map.h
>> @@ -78,6 +78,18 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
>> iosys_map_wr(map__, offset__, type__, val__); \
>> })
>>
>> +#define xe_map_rd_array(xe__, map__, index__, type__) \
>> + xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
>> +
>> +#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
>> + xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
>> +
>> +#define xe_map_rd_array_u32(xe__, map__, index__) \
>> + xe_map_rd_array(xe__, map__, index__, u32)
>> +
>> +#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
>> + xe_map_wr_array(xe__, map__, index__, u32, val__)
>> +
>> #define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
>> struct xe_device *__xe = xe__; \
>> xe_device_assert_mem_access(__xe); \
>> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
>> index e70f1ceabbb3..2674fa948fda 100644
>> --- a/drivers/gpu/drm/xe/xe_sriov_vf.c
>> +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
>> @@ -10,6 +10,7 @@
>> #include "xe_gt.h"
>> #include "xe_gt_sriov_printk.h"
>> #include "xe_gt_sriov_vf.h"
>> +#include "xe_guc_ct.h"
>> #include "xe_pm.h"
>> #include "xe_sriov.h"
>> #include "xe_sriov_printk.h"
>> @@ -158,6 +159,20 @@ static int vf_post_migration_requery_guc(struct xe_device *xe)
>> return ret;
>> }
>>
>> +static void vf_post_migration_fixup_ctb(struct xe_device *xe)
>> +{
>> + struct xe_gt *gt;
>> + unsigned int id;
>> +
>> + xe_assert(xe, IS_SRIOV_VF(xe));
>> +
>> + for_each_gt(gt, xe, id) {
>> + s32 shift = xe_gt_sriov_vf_ggtt_shift(gt);
>> +
>> + xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift);
>> + }
>> +}
>> +
>> /*
>> * vf_post_migration_imminent - Check if post-restore recovery is coming.
>> * @xe: the &xe_device struct instance
>> @@ -224,6 +239,9 @@ static void vf_post_migration_recovery(struct xe_device *xe)
>>
>> need_fixups = vf_post_migration_fixup_ggtt_nodes(xe);
>> /* FIXME: add the recovery steps */
>> + if (need_fixups)
>> + vf_post_migration_fixup_ctb(xe);
>> +
>> vf_post_migration_notify_resfix_done(xe);
>> xe_pm_runtime_put(xe);
>> drm_notice(&xe->drm, "migration recovery ended\n");
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/intel-xe/attachments/20250409/5c521df6/attachment-0001.htm>
More information about the Intel-xe
mailing list