[PATCH libdrm 5/9] amdgpu: Add amdgpu_bo_handle_type_kms_user
Michel Dänzer
michel at daenzer.net
Mon Jun 24 16:54:02 UTC 2019
From: Michel Dänzer <michel.daenzer at amd.com>
amdgpu_bo_handle_type_kms returns the handle valid for the DRM file
descriptor (fd) used for CS submission etc. This is also valid for the
fd passed to amdgpu_device_initialize the first time for a specific
GPU, but in general not for fds passed to amdgpu_device_initialize
later for the same GPU.
Because some use-cases require a handle valid for the fd passed to
amdgpu_device_initialize, amdgpu_bo_handle_type_kms_user is added for
this purpose.
Signed-off-by: Michel Dänzer <michel.daenzer at amd.com>
---
amdgpu/amdgpu.h | 14 ++++++++-
amdgpu/amdgpu_bo.c | 65 +++++++++++++++++++++++++++-------------
amdgpu/amdgpu_internal.h | 1 +
3 files changed, 59 insertions(+), 21 deletions(-)
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 66e45f73..f95c0a36 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -81,7 +81,14 @@ enum amdgpu_bo_handle_type {
/** GEM flink name (needs DRM authentication, used by DRI2) */
amdgpu_bo_handle_type_gem_flink_name = 0,
- /** KMS handle which is used by all driver ioctls */
+ /** KMS handle which is used by all driver ioctls
+ *
+ * NOTE: The returned handle is valid for the DRM file description
+ * used for command submission, which may be different from the one
+ * referenced by the file descriptor passed to
+ * amdgpu_device_initialize. Use amdgpu_bo_handle_type_kms_user to
+ * get a handle valid for the latter.
+ */
amdgpu_bo_handle_type_kms = 1,
/** DMA-buf fd handle */
@@ -91,6 +98,11 @@ enum amdgpu_bo_handle_type {
* amdgpu_bo_handle_type_kms, use that instead of this
*/
amdgpu_bo_handle_type_kms_noimport = 3,
+
+ /** KMS handle valid for the DRM file description referenced by the
+ * file descriptor passed to amdgpu_device_initialize.
+ */
+ amdgpu_bo_handle_type_kms_user = 4,
};
/** Define known types of GPU VM VA ranges */
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 7fec1f15..8d42db90 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -229,42 +229,58 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle user_bo,
return 0;
}
+static int amdgpu_bo_get_user_handle(struct amdgpu_bo *user_bo,
+ uint32_t *user_handle)
+{
+ struct amdgpu_device *user_dev = user_bo->dev;
+ struct amdgpu_core_device *dev = user_dev->core;
+ struct amdgpu_core_bo *bo = user_bo->core;
+ int dma_fd;
+ int r;
+
+ if (user_dev->user_fd == dev->fd) {
+ *user_handle = bo->handle;
+ return 0;
+ }
+
+ if (user_bo->user_handle)
+ goto out;
+
+ r = drmPrimeHandleToFD(dev->fd, bo->handle, DRM_CLOEXEC, &dma_fd);
+ if (r)
+ return r;
+
+ r = drmPrimeFDToHandle(user_dev->user_fd, dma_fd, &user_bo->user_handle);
+ close(dma_fd);
+ if (r)
+ return r;
+
+out:
+ *user_handle = user_bo->user_handle;
+ return 0;
+}
+
static int amdgpu_bo_export_flink(amdgpu_bo_handle user_bo)
{
struct amdgpu_core_device *dev = user_bo->dev->core;
struct amdgpu_core_bo *bo = user_bo->core;
- int user_fd = user_bo->dev->user_fd;
struct drm_gem_flink flink;
- int fd, dma_fd;
- uint32_t handle;
int r;
- fd = dev->fd;
- handle = bo->handle;
if (bo->flink_name)
return 0;
- if (user_fd != fd) {
- r = drmPrimeHandleToFD(fd, bo->handle, DRM_CLOEXEC, &dma_fd);
- if (!r) {
- r = drmPrimeFDToHandle(user_fd, dma_fd, &handle);
- close(dma_fd);
- }
- if (r)
- return r;
- fd = user_fd;
- }
memset(&flink, 0, sizeof(flink));
- flink.handle = handle;
- r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
+ r = amdgpu_bo_get_user_handle(user_bo, &flink.handle);
if (r)
return r;
- bo->flink_name = flink.name;
+ r = drmIoctl(user_bo->dev->user_fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (r)
+ return r;
- if (user_fd != dev->fd)
- amdgpu_close_kms_handle(user_fd, handle);
+ bo->flink_name = flink.name;
pthread_mutex_lock(&dev->bo_table_mutex);
r = handle_table_insert(&dev->bo_flink_names, bo->flink_name, bo);
@@ -294,6 +310,9 @@ drm_public int amdgpu_bo_export(amdgpu_bo_handle user_bo,
*shared_handle = bo->handle;
return 0;
+ case amdgpu_bo_handle_type_kms_user:
+ return amdgpu_bo_get_user_handle(user_bo, shared_handle);
+
case amdgpu_bo_handle_type_dma_buf_fd:
return drmPrimeHandleToFD(user_bo->dev->core->fd, bo->handle,
DRM_CLOEXEC | DRM_RDWR,
@@ -355,6 +374,7 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle user_dev,
case amdgpu_bo_handle_type_kms:
case amdgpu_bo_handle_type_kms_noimport:
+ case amdgpu_bo_handle_type_kms_user:
/* Importing a KMS handle in not allowed. */
r = -EPERM;
goto unlock;
@@ -404,6 +424,7 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle user_dev,
case amdgpu_bo_handle_type_kms:
case amdgpu_bo_handle_type_kms_noimport:
+ case amdgpu_bo_handle_type_kms_user:
assert(0); /* unreachable */
}
@@ -489,6 +510,10 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
if (update_references(&user_bo->refcount, NULL)) {
amdgpu_core_bo_free(user_bo);
+ if (user_bo->user_handle) {
+ amdgpu_close_kms_handle(user_bo->dev->user_fd,
+ user_bo->user_handle);
+ }
free(user_bo);
}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 3a2ab74c..a08a4ae8 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -111,6 +111,7 @@ struct amdgpu_core_bo {
struct amdgpu_bo {
atomic_t refcount;
+ uint32_t user_handle;
struct amdgpu_bo *next;
struct amdgpu_core_bo *core;
struct amdgpu_device *dev;
--
2.20.1
More information about the amd-gfx
mailing list