[PATCH v6 6/9] drm/xe/xe2: Update emit_pte to use compression enabled PAT index

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Fri Dec 8 05:06:04 UTC 2023


On 07-12-2023 18:17, Thomas Hellström wrote:
>
> On 12/7/23 10:19, Himal Prasad Ghimiray wrote:
>> For indirect accessed buffer use compression enabled PAT index.
>>
>> v2:
>>   - Fix parameter name.
>>
>> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>> Cc: Matthew Auld <matthew.auld at intel.com>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/xe/tests/xe_migrate.c |  2 +-
>>   drivers/gpu/drm/xe/xe_migrate.c       | 20 ++++++++++++++------
>>   2 files changed, 15 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c 
>> b/drivers/gpu/drm/xe/tests/xe_migrate.c
>> index 83d6a66ed369..f77477f7e9fa 100644
>> --- a/drivers/gpu/drm/xe/tests/xe_migrate.c
>> +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
>> @@ -330,7 +330,7 @@ static void xe_migrate_sanity_test(struct 
>> xe_migrate *m, struct kunit *test)
>>       else
>>           xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
>>   -    emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
>> +    emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
>>            &src_it, XE_PAGE_SIZE, pt);
>>         run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake 
>> PT", test);
>> diff --git a/drivers/gpu/drm/xe/xe_migrate.c 
>> b/drivers/gpu/drm/xe/xe_migrate.c
>> index 98dca906a023..1bfb249680f4 100644
>> --- a/drivers/gpu/drm/xe/xe_migrate.c
>> +++ b/drivers/gpu/drm/xe/xe_migrate.c
>> @@ -416,15 +416,23 @@ static u32 pte_update_size(struct xe_migrate *m,
>>     static void emit_pte(struct xe_migrate *m,
>>                struct xe_bb *bb, u32 at_pt,
>> -             bool is_vram,
>> +             bool is_vram, bool is_comp_pte,
>>                struct xe_res_cursor *cur,
>>                u32 size, struct xe_bo *bo)
>>   {
>> -    u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
>> +    struct xe_device *xe = tile_to_xe(m->tile);
>> +
>> +    u16 pat_index;
>>       u32 ptes;
>>       u64 ofs = at_pt * XE_PAGE_SIZE;
>>       u64 cur_ofs;
>>   +    /* Indirect access needs compression enabled uncached PAT 
>> index */
>> +    if (GRAPHICS_VERx100(xe) >= 2000)
>> +        pat_index = is_comp_pte ? 12 : xe->pat.idx[XE_CACHE_NONE];
>
> Please use a relevant define instead of  "12".
Sure.
>
>> +    else
>> +        pat_index = xe->pat.idx[XE_CACHE_WB];
>> +
>>       /*
>>        * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
>>        * we're only emitting VRAM PTEs during sanity tests, so when
>> @@ -722,19 +730,19 @@ struct dma_fence *xe_migrate_copy(struct 
>> xe_migrate *m,
>>           }
>>             if (!src_is_vram)
>> -            emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
>> +            emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, 
>> src_L0,
>>                    src_bo);
>>           else
>>               xe_res_next(&src_it, src_L0);
>>             if (!dst_is_vram)
>> -            emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0,
>> +            emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, 
>> src_L0,
>>                    dst_bo);
>>           else
>>               xe_res_next(&dst_it, src_L0);
>>             if (copy_system_ccs)
>> -            emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, src_bo);
>> +            emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, 
>> src_bo);
>>             bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
>>           update_idx = bb->len;
>> @@ -975,7 +983,7 @@ struct dma_fence *xe_migrate_clear(struct 
>> xe_migrate *m,
>>             /* Preemption is enabled again by the ring ops. */
>>           if (!clear_vram) {
>> -            emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
>> +            emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, 
>> clear_L0,
>>                    bo);
>>           } else {
>>               xe_res_next(&src_it, clear_L0);


More information about the Intel-xe mailing list