[PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
Marek Olšák
maraeo at gmail.com
Thu Sep 28 20:41:03 UTC 2017
Can I get Rb for this series?
Thanks,
Marek
On Tue, Sep 12, 2017 at 10:42 PM, Marek Olšák <maraeo at gmail.com> wrote:
> From: Marek Olšák <marek.olsak at amd.com>
>
> for being able to convert an amdgpu fence into one of the handles.
> Mesa will use this.
>
> Signed-off-by: Marek Olšák <marek.olsak at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 61 +++++++++++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 +
> include/uapi/drm/amdgpu_drm.h | 16 +++++++++
> 5 files changed, 82 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index b5c8b90..c15fa93 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
> int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp);
> int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7cb8a59..6dd719c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -25,6 +25,7 @@
> * Jerome Glisse <glisse at freedesktop.org>
> */
> #include <linux/pagemap.h>
> +#include <linux/sync_file.h>
> #include <drm/drmP.h>
> #include <drm/amdgpu_drm.h>
> #include <drm/drm_syncobj.h>
> @@ -1311,6 +1312,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
> return fence;
> }
>
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp)
> +{
> + struct amdgpu_device *adev = dev->dev_private;
> + struct amdgpu_fpriv *fpriv = filp->driver_priv;
> + union drm_amdgpu_fence_to_handle *info = data;
> + struct dma_fence *fence;
> + struct drm_syncobj *syncobj;
> + struct sync_file *sync_file;
> + int fd, r;
> +
> + if (amdgpu_kms_vram_lost(adev, fpriv))
> + return -ENODEV;
> +
> + fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
> + if (IS_ERR(fence))
> + return PTR_ERR(fence);
> +
> + switch (info->in.what) {
> + case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
> + r = drm_syncobj_create(&syncobj, 0, fence);
> + dma_fence_put(fence);
> + if (r)
> + return r;
> + r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
> + drm_syncobj_put(syncobj);
> + return r;
> +
> + case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
> + r = drm_syncobj_create(&syncobj, 0, fence);
> + dma_fence_put(fence);
> + if (r)
> + return r;
> + r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
> + drm_syncobj_put(syncobj);
> + return r;
> +
> + case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
> + fd = get_unused_fd_flags(O_CLOEXEC);
> + if (fd < 0) {
> + dma_fence_put(fence);
> + return fd;
> + }
> +
> + sync_file = sync_file_create(fence);
> + dma_fence_put(fence);
> + if (!sync_file) {
> + put_unused_fd(fd);
> + return -ENOMEM;
> + }
> +
> + fd_install(fd, sync_file->file);
> + info->out.handle = fd;
> + return 0;
> +
> + default:
> + return -EINVAL;
> + }
> +}
> +
> /**
> * amdgpu_cs_wait_all_fence - wait on all fences to signal
> *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index d01aca6..1e38411 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -70,9 +70,10 @@
> * - 3.18.0 - Export gpu always on cu bitmap
> * - 3.19.0 - Add support for UVD MJPEG decode
> * - 3.20.0 - Add support for local BOs
> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
> */
> #define KMS_DRIVER_MAJOR 3
> -#define KMS_DRIVER_MINOR 20
> +#define KMS_DRIVER_MINOR 21
> #define KMS_DRIVER_PATCHLEVEL 0
>
> int amdgpu_vram_limit = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index d31777b..b09d315 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
> DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> /* KMS */
> DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 9f5bd97..ec57cd0 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -53,6 +53,7 @@ extern "C" {
> #define DRM_AMDGPU_WAIT_FENCES 0x12
> #define DRM_AMDGPU_VM 0x13
> #define DRM_AMDGPU_FREESYNC 0x14
> +#define DRM_AMDGPU_FENCE_TO_HANDLE 0x15
>
> #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
> #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -69,6 +70,7 @@ extern "C" {
> #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
> #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
> #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>
> #define AMDGPU_GEM_DOMAIN_CPU 0x1
> #define AMDGPU_GEM_DOMAIN_GTT 0x2
> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
> __u32 handle;
> };
>
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
> +
> +union drm_amdgpu_fence_to_handle {
> + struct {
> + struct drm_amdgpu_fence fence;
> + __u32 what;
> + } in;
> + struct {
> + __u32 handle;
> + } out;
> +};
> +
> struct drm_amdgpu_cs_chunk_data {
> union {
> struct drm_amdgpu_cs_chunk_ib ib_data;
> --
> 2.7.4
>
More information about the amd-gfx
mailing list