[PATCH libdrm] amdgpu: add amdgpu_find_bo_by_cpu_mapping interface (v2)
Junwei Zhang
Jerry.Zhang at amd.com
Mon Jul 30 10:04:38 UTC 2018
From: Chunming Zhou <David1.Zhou at amd.com>
(amdgpu_drm.h will be synced from kernel header as general approach, here provided for review only)
userspace needs to know if the user memory is from BO or malloc.
v2: modify IOCTL data as union(in, out)
update bo table management(get, set)
simplify code logic
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
Signed-off-by: Junwei Zhang <Jerry.Zhang at amd.com> (v2)
Reviewed-by: Jammy Zhou <Jammy.Zhou at amd.com>
Reviewed-by: Christian König <christian.koenig at amd.com>
---
amdgpu/amdgpu.h | 24 ++++++++++++++++++++++++
amdgpu/amdgpu_bo.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++
include/drm/amdgpu_drm.h | 21 +++++++++++++++++++++
3 files changed, 94 insertions(+)
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index be83b45..e7a6642 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -678,6 +678,30 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle);
/**
+ * Validate if the user memory comes from BO
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * if the user memory is not from BO, the buf_handle will be NULL.
+ * \param offset_in_bo - [out] offset in this BO for this user memory
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo);
+
+
+/**
* Free previosuly allocated memory
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index d29be24..6c4b8f5 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -534,6 +534,55 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
}
}
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo)
+{
+ int r;
+ struct amdgpu_bo *bo;
+ union drm_amdgpu_gem_find_bo args;
+
+ args.in.addr = (uintptr_t)cpu;
+ args.in.size = size;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_FIND_BO,
+ &args, sizeof(args));
+ if (r)
+ return r;
+ if (args.out.handle == 0)
+ return -EINVAL;
+
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ bo = util_hash_table_get(dev->bo_handles,
+ (void*)(uintptr_t)args.out.handle);
+ if (bo) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ atomic_inc(&bo->refcount);
+ goto out;
+ }
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return -ENOMEM;
+ }
+
+ atomic_set(&bo->refcount, 1);
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+ bo->dev = dev;
+ bo->alloc_size = size;
+ bo->handle = args.out.handle;
+ util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+out:
+ *buf_handle = bo;
+ *offset_in_bo = args.out.offset;
+ return r;
+}
+
+
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index c363b67..6305cb6 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -54,6 +54,7 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
+#define DRM_AMDGPU_GEM_FIND_BO 0x16
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -71,6 +72,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, union drm_amdgpu_gem_find_bo)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -356,6 +358,25 @@ union drm_amdgpu_gem_wait_idle {
struct drm_amdgpu_gem_wait_idle_out out;
};
+struct drm_amdgpu_gem_find_bo_in {
+ /* CPU address */
+ __u64 addr;
+ /* memory size from CPU address */
+ __u64 size;
+};
+
+struct drm_amdgpu_gem_find_bo_out {
+ /* offset in bo */
+ __u64 offset;
+ /* resulting GEM handle */
+ __u32 handle;
+};
+
+union drm_amdgpu_gem_find_bo {
+ struct drm_amdgpu_gem_find_bo_in in;
+ struct drm_amdgpu_gem_find_bo_out out;
+};
+
struct drm_amdgpu_wait_cs_in {
/* Command submission handle
* handle equals 0 means none to wait for
--
1.9.1
More information about the amd-gfx
mailing list