[PATCH RFC i-g-t 1/2] lib/xe/xe_util: helper function
Zeng, Oak
oak.zeng at intel.com
Wed May 1 03:55:28 UTC 2024
> -----Original Message-----
> From: Bommu, Krishnaiah <krishnaiah.bommu at intel.com>
> Sent: Tuesday, April 30, 2024 2:28 PM
> To: igt-dev at lists.freedesktop.org
> Cc: Bommu, Krishnaiah <krishnaiah.bommu at intel.com>; Zeng, Oak
> <oak.zeng at intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray at intel.com>
> Subject: [PATCH RFC i-g-t 1/2] lib/xe/xe_util: helper function
Improve the subject as well
>
> Introduce helper functions for object creation, binding, submission, and
> destruction, applicable for SVM and other tests.
>
> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu at intel.com>
> Cc: Oak Zeng <oak.zeng at intel.com>
> Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> lib/xe/xe_util.c | 113
> +++++++++++++++++++++++++++++++++++++++++++++++
> lib/xe/xe_util.h | 32 ++++++++++++++
> 2 files changed, 145 insertions(+)
>
> diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
> index 050162b5e..3f1e8c052 100644
> --- a/lib/xe/xe_util.c
> +++ b/lib/xe/xe_util.c
> @@ -10,6 +10,119 @@
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> #include "xe/xe_util.h"
> +#include "lib/svga/vm_basic_types.h"
> +
> +/*submit a command
> +* wait for cmmand to cmplete from gpu
> +* verify ufence value*/
> +void xe_submit_cmd(struct xe_buffer *cmdbuf)
> +{
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE,
> + .addr = xe_cmdbuf_exec_ufence_gpuva(cmdbuf),},
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(&sync),
> + .exec_queue_id = cmdbuf->exec_queue,
> + .address = (uint64_t)cmdbuf->gpu_addr,
> + };
> +
> + xe_exec(cmdbuf->fd, &exec);
> + xe_wait_ufence(cmdbuf->fd,
> xe_cmdbuf_exec_ufence_cpuva(cmdbuf), USER_FENCE_VALUE,
> + cmdbuf->exec_queue, ONE_SEC);
> +}
> +
> +/*create a buffer, map it to cpu and gpu */
> +void xe_create_buffer(struct xe_buffer *buffer)
> +{
> +#define ONE_SEC MS_TO_NS(1000)
You also used ONE_SEC definition in xe_submit_cmd function. So move it out of this function
Oh, you defined it in header file... so delete here
> + struct drm_xe_sync sync[1] = {
I personally prefer write struct drm_xe_sync sync. But I am fine if you keep it sync[1]....Not a big deal
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags =
> DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> +
> + buffer->bind_queue = xe_bind_exec_queue_create(buffer->fd,
> buffer->vm, 0);
> + buffer->bind_ufence =
> aligned_alloc(xe_get_default_alignment(buffer->fd), buffer->size);
Here we allocate a ufence, so don't use buffer->size. Buffer->size is the size of the buffer we want to create, not the size of ufence. I think 8 bytes with alignment is enough for ufence?
> + sync->addr = buffer->bind_ufence;
Write sync.addr if write sync above
> +
> + if (!buffer->is_userptr) {
> + buffer->bo = xe_bo_create(buffer->fd, 0, buffer->size,
> buffer->placement, buffer->flag);
> + buffer->cpu_addr = xe_bo_map(buffer->fd, buffer->bo,
> buffer->size);
> + } else {
> + buffer->bo =
> aligned_alloc(xe_get_default_alignment(buffer->fd), buffer->size);
> + }
> +
> + xe_vm_bind_async(buffer->fd, buffer->vm, buffer->bind_queue,
> buffer->bo, 0, (uint64_t)buffer->gpu_addr, buffer->size, sync, 1);
Write &sync if you write sync above...
> +
> + xe_wait_ufence(buffer->fd, buffer->bind_ufence,
> USER_FENCE_VALUE, buffer->bind_queue, ONE_SEC);
> +}
> +
> +void xe_destroy_buffer(struct xe_buffer *buffer)
> +{
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags =
> DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> + sync->addr = buffer->bind_ufence;
> +
> + xe_vm_unbind_async(buffer->fd, buffer->vm, buffer->bind_queue,
> 0, (uint64_t)buffer->gpu_addr, buffer->size, sync, 1);
> + xe_wait_ufence(buffer->fd, buffer->bind_ufence,
> USER_FENCE_VALUE, buffer->bind_queue, ONE_SEC);
> +
> + munmap(buffer->cpu_addr, buffer->size);
This is only for !is_userptr case
> + if (!buffer->is_userptr)
> + gem_close(buffer->fd, buffer->bo);
> + else
> + free(buffer->bo);
> +
> + free(buffer->bind_ufence);
> + xe_exec_queue_destroy(buffer->fd, buffer->bind_queue);
> +}
> +
> +/*
> +a command buffer is a buffer in GT0's vram, filled with gpu commands,
> +plus some memory for a ufence used to sync command submission
> +*/
> +void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val)
> +{
> + int i = 0;
> +
> + batch[i] = MI_STORE_DWORD_IMM_GEN4;
> + batch[++i] = dst_va;
> + batch[++i] = dst_va >> 32;
> + batch[++i] = val;
> + batch[++i] = MI_BATCH_BUFFER_END;
> +}
> +
> +void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t
> fill_func, uint64_t dst_va, uint32_t val, struct drm_xe_engine_class_instance
> *eci)
> +{
> + //make some room for a exec_ufence, which will be used to sync the
> + //submission of this command....
> +
> + cmd_buf->size = xe_bb_size(cmd_buf->fd, cmd_buf->size +
> PAGE_ALIGN_UFENCE);
> + xe_create_buffer(cmd_buf);
> + cmd_buf->exec_queue = xe_exec_queue_create(cmd_buf->fd,
> cmd_buf->vm, eci, 0);
> + fill_func(cmd_buf->cpu_addr, dst_va, val);
> +}
> +
> +void xe_destroy_cmdbuf(struct xe_buffer *cmd_buf)
> +{
> + xe_exec_queue_destroy(cmd_buf->fd, cmd_buf->exec_queue);
> + xe_destroy_buffer(cmd_buf);
> +}
> +
> +uint64_t xe_cmdbuf_exec_ufence_gpuva(struct xe_buffer *cmd_buf)
> +{
Write a simple comment to say the last 8 bytes of the cmd buffer is used as ufence...
Oak
> + return (uint64_t)cmd_buf->gpu_addr + cmd_buf->size - 8;
> +}
> +
> +uint64_t *xe_cmdbuf_exec_ufence_cpuva(struct xe_buffer *cmd_buf)
> +{
> + return cmd_buf->cpu_addr + cmd_buf->size - 8;
> +}
>
> static bool __region_belongs_to_regions_type(struct drm_xe_mem_region
> *region,
> uint32_t *mem_regions_type,
> diff --git a/lib/xe/xe_util.h b/lib/xe/xe_util.h
> index 6480ea01a..35f59cb6b 100644
> --- a/lib/xe/xe_util.h
> +++ b/lib/xe/xe_util.h
> @@ -12,6 +12,38 @@
> #include <stdint.h>
> #include <xe_drm.h>
>
> +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> +#define ONE_SEC MS_TO_NS(1000)
> +#define PAGE_ALIGN_UFENCE 4096
> +
> +struct xe_buffer {
> + void *cpu_addr;
> + void *gpu_addr;
> + /*the user fence used to vm bind this buffer*/
> + uint64_t bind_ufence;
> + uint64_t size;
> + uint32_t flag;
> + uint32_t vm;
> + uint32_t bo;
> + uint32_t placement;
> + uint32_t bind_queue;
> + /*only a cmd buffer has a exec queue*/
> + uint32_t exec_queue;
> + int fd;
> + bool is_userptr;
> +};
> +
> +typedef void (*cmdbuf_fill_func_t) (uint32_t *batch, uint64_t dst_gpu_va,
> uint32_t val);
> +void xe_create_buffer(struct xe_buffer *buffer);
> +void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t
> fill_func,
> + uint64_t dst_va, uint32_t val, struct
> drm_xe_engine_class_instance *eci);
> +uint64_t xe_cmdbuf_exec_ufence_gpuva(struct xe_buffer *cmd_buf);
> +uint64_t *xe_cmdbuf_exec_ufence_cpuva(struct xe_buffer *cmd_buf);
> +void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val);
> +void xe_submit_cmd(struct xe_buffer *cmdbuf);
> +void xe_destroy_buffer(struct xe_buffer *buffer);
> +void xe_destroy_cmdbuf(struct xe_buffer *cmd_buf);
> +
> #define XE_IS_SYSMEM_MEMORY_REGION(fd, region) \
> (xe_region_class(fd, region) ==
> DRM_XE_MEM_REGION_CLASS_SYSMEM)
> #define XE_IS_VRAM_MEMORY_REGION(fd, region) \
> --
> 2.25.1
More information about the igt-dev
mailing list