[PATCH libdrm 2/2] drm/amdgpu: add new low overhead command submission API.

Christian König deathsimple at vodafone.de
Tue Jul 18 13:51:06 UTC 2017


Am 18.07.2017 um 02:48 schrieb Dave Airlie:
> From: Dave Airlie <airlied at redhat.com>
>
> This just sends chunks to the kernel API for a single command
> stream.
>
> This should provide a more future proof and extensible API
> for command submission.
>
> Signed-off-by: Dave Airlie <airlied at redhat.com>
> ---
>   amdgpu/amdgpu.h    | 21 +++++++++++++++++++++
>   amdgpu/amdgpu_cs.c | 30 ++++++++++++++++++++++++++++++
>   2 files changed, 51 insertions(+)
>
> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> index 183f974..b4a070d 100644
> --- a/amdgpu/amdgpu.h
> +++ b/amdgpu/amdgpu.h
> @@ -1382,6 +1382,27 @@ int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
>   			     int shared_fd,
>   			     uint32_t *syncobj);
>   
> +/**
> + *  Submit raw command submission to kernel
> + *
> + * \param   dev	       - \c [in] device handle
> + * \param   context    - \c [in] context handle for context id
> + * \param   bo_list_handle - \c [in] request bo list handle (0 for none)
> + * \param   num_chunks - \c [in] number of CS chunks to submit
> + * \param   chunks     - \c [in] array of CS chunks
> + * \param   seq_no     - \c [out] output sequence number for submission.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> +*/
> +struct drm_amdgpu_cs_chunk;
> +int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> +			 amdgpu_context_handle context,
> +			 uint32_t bo_list_handle,

Why is bo_list_handle an uint32_t here?

Apart from that it looks good to me,
Christian.

> +			 int num_chunks,
> +			 struct drm_amdgpu_cs_chunk *chunks,
> +			 uint64_t *seq_no);
>   #ifdef __cplusplus
>   }
>   #endif
> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> index 722fd75..3c32070 100644
> --- a/amdgpu/amdgpu_cs.c
> +++ b/amdgpu/amdgpu_cs.c
> @@ -634,3 +634,33 @@ int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
>   
>   	return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
>   }
> +
> +int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> +			 amdgpu_context_handle context,
> +			 uint32_t bo_list_handle,
> +			 int num_chunks,
> +			 struct drm_amdgpu_cs_chunk *chunks,
> +			 uint64_t *seq_no)
> +{
> +	union drm_amdgpu_cs cs = {0};
> +	uint64_t *chunk_array;
> +	int i, r;
> +	if (num_chunks == 0)
> +		return -EINVAL;
> +
> +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> +	for (i = 0; i < num_chunks; i++)
> +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> +	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> +	cs.in.ctx_id = context->id;
> +	cs.in.bo_list_handle = bo_list_handle;
> +	cs.in.num_chunks = num_chunks;
> +	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> +				&cs, sizeof(cs));
> +	if (r)
> +		return r;
> +
> +	if (seq_no)
> +		*seq_no = cs.out.handle;
> +	return 0;
> +}




More information about the amd-gfx mailing list