[PATCH v2 2/4] drm/xe: Invert page fault queue head / tail
Lucas De Marchi
lucas.demarchi at intel.com
Wed Jan 10 20:28:11 UTC 2024
On Tue, Jan 09, 2024 at 05:24:37PM -0800, Matthew Brost wrote:
>Convention for queues in Linux is the producer moves the head and
>consumer moves the tail. Fix the page fault queue to conform to this
>convention.
>
>Cc: Lucas De Marchi <lucas.demarchi at intel.com>
>Signed-off-by: Matthew Brost <matthew.brost at intel.com>
it's a dangerous change if not automated or checked, so I decided to
compare before and after. By comparing the output object, the only
change I see is in xe_gt_pagefault_reset(), and that is because the
order of the tail and head assigments to zero. With this additional
hunk:
@@ -441,8 +441,8 @@ void xe_gt_pagefault_reset(struct xe_gt *gt)
for (i = 0; i < NUM_PF_QUEUE; ++i) {
spin_lock_irq(>->usm.pf_queue[i].lock);
- gt->usm.pf_queue[i].head = 0;
gt->usm.pf_queue[i].tail = 0;
+ gt->usm.pf_queue[i].head = 0;
spin_unlock_irq(>->usm.pf_queue[i].lock);
}
the .s generated matches 100%, but no need to change this.
Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>
thanks
Lucas De Marchi
>---
> drivers/gpu/drm/xe/xe_gt_pagefault.c | 14 +++++++-------
> drivers/gpu/drm/xe/xe_gt_types.h | 12 ++++++------
> 2 files changed, 13 insertions(+), 13 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
>index 0a61e4413679..3ca715e2ec19 100644
>--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
>+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
>@@ -282,9 +282,9 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
> bool ret = false;
>
> spin_lock_irq(&pf_queue->lock);
>- if (pf_queue->head != pf_queue->tail) {
>+ if (pf_queue->tail != pf_queue->head) {
> desc = (const struct xe_guc_pagefault_desc *)
>- (pf_queue->data + pf_queue->head);
>+ (pf_queue->data + pf_queue->tail);
>
> pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
> pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
>@@ -302,7 +302,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
> pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
> PFD_VIRTUAL_ADDR_LO_SHIFT;
>
>- pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
>+ pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
> PF_QUEUE_NUM_DW;
> ret = true;
> }
>@@ -315,7 +315,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
> {
> lockdep_assert_held(&pf_queue->lock);
>
>- return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
>+ return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
> PF_MSG_LEN_DW;
> }
>
>@@ -342,8 +342,8 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
> spin_lock_irqsave(&pf_queue->lock, flags);
> full = pf_queue_full(pf_queue);
> if (!full) {
>- memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
>- pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
>+ memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
>+ pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
> queue_work(gt->usm.pf_wq, &pf_queue->worker);
> } else {
> drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
>@@ -389,7 +389,7 @@ static void pf_queue_work_func(struct work_struct *w)
> send_pagefault_reply(>->uc.guc, &reply);
>
> if (time_after(jiffies, threshold) &&
>- pf_queue->head != pf_queue->tail) {
>+ pf_queue->tail != pf_queue->head) {
> queue_work(gt->usm.pf_wq, w);
> break;
> }
>diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
>index f74684660475..b15503dabba4 100644
>--- a/drivers/gpu/drm/xe/xe_gt_types.h
>+++ b/drivers/gpu/drm/xe/xe_gt_types.h
>@@ -225,16 +225,16 @@ struct xe_gt {
> #define PF_QUEUE_NUM_DW 128
> /** @data: data in the page fault queue */
> u32 data[PF_QUEUE_NUM_DW];
>- /**
>- * @head: head pointer in DWs for page fault queue,
>- * moved by worker which processes faults.
>- */
>- u16 head;
> /**
> * @tail: tail pointer in DWs for page fault queue,
>- * moved by G2H handler.
>+ * moved by worker which processes faults (consumer).
> */
> u16 tail;
>+ /**
>+ * @head: head pointer in DWs for page fault queue,
>+ * moved by G2H handler (producer).
>+ */
>+ u16 head;
> /** @lock: protects page fault queue */
> spinlock_t lock;
> /** @worker: to process page faults */
>--
>2.34.1
>
More information about the Intel-xe
mailing list