[PATCH v2 16/29] drm/xe: Add migrate layer functions for SVM support
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Nov 19 16:45:27 UTC 2024
On Tue, 2024-10-15 at 20:25 -0700, Matthew Brost wrote:
> Add functions which migrate to / from VRAM accepting a single DPA
> argument (VRAM) and array of dma addresses (SRAM).
>
> v2:
> - Don't unlock job_mutex in error path of xe_migrate_vram
>
> Signed-off-by: Oak Zeng <oak.zeng at intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_migrate.c | 149
> ++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_migrate.h | 10 +++
> 2 files changed, 159 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c
> b/drivers/gpu/drm/xe/xe_migrate.c
> index cfd31ae49cc1..d7b6636286ae 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -1542,6 +1542,155 @@ void xe_migrate_wait(struct xe_migrate *m)
> dma_fence_wait(m->fence, false);
> }
>
> +static u32 pte_update_cmd_size(u64 size)
> +{
> + u32 dword;
dwords or num_dword?
> + u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
> +
> + XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
> + /*
> + * MI_STORE_DATA_IMM command is used to update page table.
> Each
> + * instruction can update maximumly 0x1ff pte entries. To
> update
> + * n (n <= 0x1ff) pte entries, we need:
> + * 1 dword for the MI_STORE_DATA_IMM command header (opcode
> etc)
> + * 2 dword for the page table's physical location
> + * 2*n dword for value of pte to fill (each pte entry is 2
> dwords)
> + */
> + dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
> + dword += entries * 2;
> +
> + return dword;
> +}
> +
> +static void build_pt_update_batch_sram(struct xe_migrate *m,
> + struct xe_bb *bb, u32
> pt_offset,
> + dma_addr_t *sram_addr, u32
> size)
> +{
> + u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
> + u32 ptes;
> + int i = 0;
> +
> + ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
> + while (ptes) {
> + u32 chunk = min(0x1ffU, ptes);
> +
> + bb->cs[bb->len++] = MI_STORE_DATA_IMM |
> MI_SDI_NUM_QW(chunk);
> + bb->cs[bb->len++] = pt_offset;
> + bb->cs[bb->len++] = 0;
> +
> + pt_offset += chunk * 8;
> + ptes -= chunk;
> +
> + while (chunk--) {
> + u64 addr = sram_addr[i++] & PAGE_MASK;
> +
> + xe_tile_assert(m->tile, addr);
> + addr = m->q->vm->pt_ops->pte_encode_addr(m-
> >tile->xe,
> +
> addr, pat_index,
> + 0,
> false, 0);
> + bb->cs[bb->len++] = lower_32_bits(addr);
> + bb->cs[bb->len++] = upper_32_bits(addr);
> + }
> + }
> +}
> +
> +enum xe_migrate_copy_dir {
> + XE_MIGRATE_COPY_TO_VRAM,
> + XE_MIGRATE_COPY_TO_SRAM,
> +};
> +
> +static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
> + unsigned long npages,
> + dma_addr_t *sram_addr, u64
> vram_addr,
> + const enum
> xe_migrate_copy_dir dir)
> +{
> + struct xe_gt *gt = m->tile->primary_gt;
> + struct xe_device *xe = gt_to_xe(gt);
> + struct dma_fence *fence = NULL;
> + u32 batch_size = 2;
> + u64 src_L0_ofs, dst_L0_ofs;
> + u64 round_update_size;
> + struct xe_sched_job *job;
> + struct xe_bb *bb;
> + u32 update_idx, pt_slot = 0;
> + int err;
> +
> + round_update_size = min_t(u64, npages * PAGE_SIZE,
> + MAX_PREEMPTDISABLE_TRANSFER);
Hm. How does the caller know how many pages were actually migrated?
> + batch_size += pte_update_cmd_size(round_update_size);
> + batch_size += EMIT_COPY_DW;
> +
> + bb = xe_bb_new(gt, batch_size, true);
> + if (IS_ERR(bb)) {
> + err = PTR_ERR(bb);
> + return ERR_PTR(err);
> + }
> +
> + build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
> + sram_addr, round_update_size);
> +
> + if (dir == XE_MIGRATE_COPY_TO_VRAM) {
> + src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
> + dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr,
> false);
> +
> + } else {
> + src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr,
> false);
> + dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
> + }
> +
> + bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
> + update_idx = bb->len;
> +
> + emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
> + XE_PAGE_SIZE);
> +
> + job = xe_bb_create_migration_job(m->q, bb,
> + xe_migrate_batch_base(m,
> true),
> + update_idx);
> + if (IS_ERR(job)) {
> + err = PTR_ERR(job);
> + goto err;
> + }
> +
> + xe_sched_job_add_migrate_flush(job, 0);
> +
> + mutex_lock(&m->job_mutex);
> + xe_sched_job_arm(job);
> + fence = dma_fence_get(&job->drm.s_fence->finished);
> + xe_sched_job_push(job);
> +
> + dma_fence_put(m->fence);
> + m->fence = dma_fence_get(fence);
> + mutex_unlock(&m->job_mutex);
> +
> + xe_bb_free(bb, fence);
> +
> + return fence;
> +
> +err:
> + xe_bb_free(bb, NULL);
> +
> + return ERR_PTR(err);
> +}
> +
> +struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> + unsigned long npages,
> + dma_addr_t *src_addr,
> + u64 dst_addr)
Kerneldoc.
> +{
> + return xe_migrate_vram(m, npages, src_addr, dst_addr,
> + XE_MIGRATE_COPY_TO_VRAM);
> +}
> +
> +struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
> + unsigned long npages,
> + u64 src_addr,
> + dma_addr_t *dst_addr)
Kerneldoc.
> +{
> + return xe_migrate_vram(m, npages, dst_addr, src_addr,
> + XE_MIGRATE_COPY_TO_SRAM);
> +}
> +
> #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
> #include "tests/xe_migrate.c"
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_migrate.h
> b/drivers/gpu/drm/xe/xe_migrate.h
> index 0109866e398a..6ff9a963425c 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.h
> +++ b/drivers/gpu/drm/xe/xe_migrate.h
> @@ -95,6 +95,16 @@ struct xe_migrate_pt_update {
>
> struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
>
> +struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> + unsigned long npages,
> + dma_addr_t *src_addr,
> + u64 dst_addr);
> +
> +struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
> + unsigned long npages,
> + u64 src_addr,
> + dma_addr_t *dst_addr);
> +
> struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
> struct xe_bo *src_bo,
> struct xe_bo *dst_bo,
More information about the dri-devel
mailing list