[PATCH libdrm] amdgpu: add a function to create amdgpu bo internally (v3)
Christian König
christian.koenig at amd.com
Mon Aug 13 06:35:01 UTC 2018
Am 13.08.2018 um 07:18 schrieb Junwei Zhang:
> a helper function to create and initialize amdgpu bo
>
> v2: update error handling: add label and free bo
> v3: update error handling: separate each error label
>
> Signed-off-by: Junwei Zhang <Jerry.Zhang at amd.com>
> ---
> amdgpu/amdgpu_bo.c | 196 ++++++++++++++++++++++++++---------------------------
> 1 file changed, 95 insertions(+), 101 deletions(-)
>
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> index b790e9b..6f0baf1 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -48,11 +48,31 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
> drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
> }
>
> +static int amdgpu_bo_create(amdgpu_device_handle dev,
> + uint64_t size,
> + uint32_t handle,
> + amdgpu_bo_handle *buf_handle)
> +{
> + struct amdgpu_bo *bo;
> +
> + bo = calloc(1, sizeof(struct amdgpu_bo));
> + if (!bo)
> + return -ENOMEM;
> +
> + atomic_set(&bo->refcount, 1);
> + bo->dev = dev;
> + bo->alloc_size = size;
> + bo->handle = handle;
> + pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> +
> + *buf_handle = bo;
> + return 0;
> +}
> +
> int amdgpu_bo_alloc(amdgpu_device_handle dev,
> struct amdgpu_bo_alloc_request *alloc_buffer,
> amdgpu_bo_handle *buf_handle)
> {
> - struct amdgpu_bo *bo;
> union drm_amdgpu_gem_create args;
> unsigned heap = alloc_buffer->preferred_heap;
> int r = 0;
> @@ -61,14 +81,6 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
> if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
> return -EINVAL;
>
> - bo = calloc(1, sizeof(struct amdgpu_bo));
> - if (!bo)
> - return -ENOMEM;
> -
> - atomic_set(&bo->refcount, 1);
> - bo->dev = dev;
> - bo->alloc_size = alloc_buffer->alloc_size;
> -
> memset(&args, 0, sizeof(args));
> args.in.bo_size = alloc_buffer->alloc_size;
> args.in.alignment = alloc_buffer->phys_alignment;
> @@ -80,24 +92,23 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
> /* Allocate the buffer with the preferred heap. */
> r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
> &args, sizeof(args));
> + if (r)
> + goto out;
> +
> + r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
> + buf_handle);
> if (r) {
> - free(bo);
> - return r;
> + amdgpu_close_kms_handle(dev, args.out.handle);
> + goto out;
> }
>
> - bo->handle = args.out.handle;
> -
> - pthread_mutex_lock(&bo->dev->bo_table_mutex);
> - r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
> - pthread_mutex_unlock(&bo->dev->bo_table_mutex);
> -
> - pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> -
> + pthread_mutex_lock(&dev->bo_table_mutex);
> + r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> + *buf_handle);
> + pthread_mutex_unlock(&dev->bo_table_mutex);
> if (r)
> - amdgpu_bo_free(bo);
> - else
> - *buf_handle = bo;
> -
> + amdgpu_bo_free(*buf_handle);
> +out:
> return r;
> }
>
> @@ -256,7 +267,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
> {
> struct drm_gem_open open_arg = {};
> struct amdgpu_bo *bo = NULL;
> - int r;
> + uint32_t handle = 0, flink_name = 0;
> + uint64_t alloc_size = 0;
> + int r = 0;
> int dma_fd;
> uint64_t dma_buf_size = 0;
>
> @@ -266,22 +279,18 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
>
> /* Convert a DMA buf handle to a KMS handle now. */
> if (type == amdgpu_bo_handle_type_dma_buf_fd) {
> - uint32_t handle;
> off_t size;
>
> /* Get a KMS handle. */
> r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
> - if (r) {
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return r;
> - }
> + if (r)
> + goto unlock;
>
> /* Query the buffer size. */
> size = lseek(shared_handle, 0, SEEK_END);
> if (size == (off_t)-1) {
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - amdgpu_close_kms_handle(dev, handle);
> - return -errno;
> + r = -errno;
> + goto close_handle;
> }
> lseek(shared_handle, 0, SEEK_SET);
>
> @@ -302,12 +311,12 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
> case amdgpu_bo_handle_type_kms:
> case amdgpu_bo_handle_type_kms_noimport:
> /* Importing a KMS handle in not allowed. */
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return -EPERM;
> + r = -EPERM;
> + goto unlock;
>
> default:
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return -EINVAL;
> + r = -EINVAL;
> + goto unlock;
> }
>
> if (bo) {
> @@ -320,58 +329,32 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
> return 0;
> }
>
> - bo = calloc(1, sizeof(struct amdgpu_bo));
> - if (!bo) {
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - if (type == amdgpu_bo_handle_type_dma_buf_fd) {
> - amdgpu_close_kms_handle(dev, shared_handle);
> - }
> - return -ENOMEM;
> - }
> -
> /* Open the handle. */
> switch (type) {
> case amdgpu_bo_handle_type_gem_flink_name:
> open_arg.name = shared_handle;
> r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
> - if (r) {
> - free(bo);
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return r;
> - }
> + if (r)
> + goto unlock;
>
> - bo->handle = open_arg.handle;
> + flink_name = shared_handle;
> + handle = open_arg.handle;
> + alloc_size = open_arg.size;
> if (dev->flink_fd != dev->fd) {
> - r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
> - if (r) {
> - free(bo);
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return r;
> - }
> - r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
> -
> + r = drmPrimeHandleToFD(dev->flink_fd, handle,
> + DRM_CLOEXEC, &dma_fd);
> + if (r)
> + goto close_handle;
> + r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
> close(dma_fd);
> -
> - if (r) {
> - free(bo);
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - return r;
> - }
> - }
> - bo->flink_name = shared_handle;
> - bo->alloc_size = open_arg.size;
> - r = handle_table_insert(&dev->bo_flink_names, shared_handle,
> - bo);
> - if (r) {
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> - amdgpu_bo_free(bo);
> - return r;
> + if (r)
> + goto close_handle;
> }
> break;
>
> case amdgpu_bo_handle_type_dma_buf_fd:
> - bo->handle = shared_handle;
> - bo->alloc_size = dma_buf_size;
> + handle = shared_handle;
> + alloc_size = dma_buf_size;
> break;
>
> case amdgpu_bo_handle_type_kms:
> @@ -380,16 +363,35 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
> }
>
> /* Initialize it. */
> - atomic_set(&bo->refcount, 1);
> - bo->dev = dev;
> - pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> + r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
> + if (r)
> + goto close_handle;
>
> - handle_table_insert(&dev->bo_handles, bo->handle, bo);
> - pthread_mutex_unlock(&dev->bo_table_mutex);
> + r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
> + if (r)
> + goto bo_free;
> + if (flink_name) {
> + bo->flink_name = flink_name;
> + r = handle_table_insert(&dev->bo_flink_names, flink_name,
> + bo);
> + if (r)
> + goto remove_handle;
> + }
>
> output->buf_handle = bo;
> output->alloc_size = bo->alloc_size;
> + pthread_mutex_unlock(&dev->bo_table_mutex);
> return 0;
> +
> +remove_handle:
> + handle_table_remove(&dev->bo_handles, bo->handle);
> +bo_free:
> + amdgpu_bo_free(bo);
> +close_handle:
> + amdgpu_close_kms_handle(dev, handle);
That is still not 100% clean here. When the handle is already hold by
the BO you try to close it twice.
You need something like this:
if (bo)
amdgpu_bo_free(bo)
else
amdgpu_close_kms_handle(dev, handle);
With that fixed the patch is Reviewed-by: Christian König
<christian.koenig at amd.com>
Regards,
Christian.
> +unlock:
> + pthread_mutex_unlock(&dev->bo_table_mutex);
> + return r;
> }
>
> int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
> @@ -574,7 +576,6 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
> amdgpu_bo_handle *buf_handle)
> {
> int r;
> - struct amdgpu_bo *bo;
> struct drm_amdgpu_gem_userptr args;
>
> args.addr = (uintptr_t)cpu;
> @@ -584,28 +585,21 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
> r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
> &args, sizeof(args));
> if (r)
> - return r;
> -
> - bo = calloc(1, sizeof(struct amdgpu_bo));
> - if (!bo)
> - return -ENOMEM;
> + goto out;
>
> - atomic_set(&bo->refcount, 1);
> - bo->dev = dev;
> - bo->alloc_size = size;
> - bo->handle = args.handle;
> -
> - pthread_mutex_lock(&bo->dev->bo_table_mutex);
> - r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
> - pthread_mutex_unlock(&bo->dev->bo_table_mutex);
> -
> - pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> + r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
> + if (r) {
> + amdgpu_close_kms_handle(dev, args.handle);
> + goto out;
> + }
>
> + pthread_mutex_lock(&dev->bo_table_mutex);
> + r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> + *buf_handle);
> + pthread_mutex_unlock(&dev->bo_table_mutex);
> if (r)
> - amdgpu_bo_free(bo);
> - else
> - *buf_handle = bo;
> -
> + amdgpu_bo_free(*buf_handle);
> +out:
> return r;
> }
>
More information about the amd-gfx
mailing list