[PATCH 1/2] drm/virtio: Implement RESOURCE_GET_LAYOUT ioctl

Julia Zhang julia.zhang at amd.com
Fri Nov 10 07:16:31 UTC 2023


From: Daniel Stone <daniels at collabora.com>

This ioctl allows the guest to discover how the guest actually allocated
the underlying buffer, which allows buffers to be used for GL<->Vulkan
interop and through standard window systems. It's also a step towards
properly supporting modifiers in the guest.
---
 drivers/gpu/drm/virtio/virtgpu_drv.c   |  1 +
 drivers/gpu/drm/virtio/virtgpu_drv.h   | 16 +++++-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 71 ++++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_kms.c   |  8 ++-
 drivers/gpu/drm/virtio/virtgpu_vq.c    | 56 ++++++++++++++++++++
 include/uapi/drm/virtgpu_drm.h         | 19 +++++++
 include/uapi/linux/virtio_gpu.h        | 30 +++++++++++
 7 files changed, 198 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 4f7140e27614..1ee09974d4b7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -190,6 +190,7 @@ static unsigned int features[] = {
 	VIRTIO_GPU_F_RESOURCE_BLOB,
 	VIRTIO_GPU_F_CONTEXT_INIT,
 	VIRTIO_GPU_F_CONTEXT_FENCE_WAIT,
+	VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT,
 };
 static struct virtio_driver virtio_gpu_driver = {
 	.feature_table = features,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7ef4b3df0ada..d6fc0d4ecb7d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -93,6 +93,15 @@ struct virtio_gpu_object {
 	bool host3d_blob, guest_blob;
 	uint32_t blob_mem, blob_flags;
 
+	atomic_t layout_state;
+	struct {
+		uint64_t offset;
+		uint64_t size;
+		uint32_t stride;
+	} planes[VIRTIO_GPU_RES_MAX_PLANES];
+	uint64_t modifier;
+	uint32_t num_planes;
+
 	int uuid_state;
 	uuid_t uuid;
 
@@ -249,6 +258,7 @@ struct virtio_gpu_device {
 	bool has_host_visible;
 	bool has_context_init;
 	bool has_host_fence_wait;
+	bool has_resource_query_layout;
 	struct virtio_shm_region host_visible_region;
 	struct drm_mm host_visible_mm;
 
@@ -281,7 +291,7 @@ struct virtio_gpu_fpriv {
 };
 
 /* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 12
+#define DRM_VIRTIO_NUM_IOCTLS 13
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
 
@@ -436,6 +446,10 @@ int virtio_gpu_cmd_status_freezing(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_host_wait(struct virtio_gpu_device *vgdev,
 			      uint32_t ctx_id, uint64_t fence_id);
 
+int
+virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev,
+				   struct virtio_gpu_object *bo);
+
 /* virtgpu_display.c */
 int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b6079d2bff69..51d04460d0d8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -107,6 +107,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
 		value = vgdev->capset_id_mask;
 		break;
+	case VIRTGPU_PARAM_RESOURCE_QUERY_LAYOUT:
+		value = vgdev->has_resource_query_layout ? 1 : 0;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -676,6 +679,70 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
 	return ret;
 }
 
+static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev,
+						  void *data,
+						  struct drm_file *file)
+{
+	struct drm_virtgpu_resource_query_layout *args = data;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct virtio_gpu_object *bo;
+	int layout_state;
+	int ret = 0;
+	int i;
+
+	if (!vgdev->has_resource_query_layout) {
+		DRM_ERROR("failing: no RQL on host\n");
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(file, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("invalid handle 0x%x\n", args->handle);
+		return -ENOENT;
+	}
+	bo = gem_to_virtio_gpu_obj(obj);
+
+	layout_state = atomic_read(&bo->layout_state);
+	if (layout_state == STATE_ERR) {
+		ret = -EINVAL;
+		goto out;
+	} else if (layout_state == STATE_OK) {
+		goto valid;
+	}
+
+	ret = virtio_gpu_cmd_get_resource_layout(vgdev, bo);
+	if (ret)
+		goto out;
+
+	ret = wait_event_timeout(vgdev->resp_wq,
+				 atomic_read(&bo->layout_state) == STATE_OK,
+				 5 * HZ);
+	if (!ret)
+		goto out;
+
+valid:
+	smp_rmb();
+	WARN_ON(atomic_read(&bo->layout_state) != STATE_OK);
+	args->num_planes = bo->num_planes;
+	args->modifier = bo->modifier;
+	for (i = 0; i < args->num_planes; i++) {
+		args->planes[i].offset = bo->planes[i].offset;
+		args->planes[i].size = bo->planes[i].size;
+		args->planes[i].stride = bo->planes[i].stride;
+	}
+	for (; i < VIRTIO_GPU_MAX_RESOURCE_PLANES; i++) {
+		args->planes[i].offset = 0;
+		args->planes[i].size = 0;
+		args->planes[i].stride = 0;
+	}
+	ret = 0;
+
+out:
+	drm_gem_object_put(obj);
+	return ret;
+}
+
 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
 			  DRM_RENDER_ALLOW),
@@ -715,4 +782,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
 
 	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
 			  DRM_RENDER_ALLOW),
+
+	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_QUERY_LAYOUT,
+			  virtio_gpu_resource_query_layout_ioctl,
+			  DRM_RENDER_ALLOW),
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index dd6450179227..fe863bf07298 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -188,6 +188,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
 		vgdev->has_resource_blob = true;
 	}
+	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT)) {
+		vgdev->has_resource_query_layout = true;
+	}
 	if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
 				  VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
 		if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -221,8 +224,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
 		 vgdev->has_host_visible ? '+' : '-',
 		 vgdev->has_host_fence_wait ? '+' : '-');
 
-	DRM_INFO("features: %ccontext_init\n",
-		 vgdev->has_context_init ? '+' : '-');
+	DRM_INFO("features: %ccontext_init %cresource_query_layout\n",
+		 vgdev->has_context_init ? '+' : '-',
+		 vgdev->has_resource_query_layout ? '+' : '-');
 
 	ret = virtio_gpu_init_vqs(vdev);
 	if (ret) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7a4c9e30f847..8d0a2eaec11c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1333,3 +1333,59 @@ void virtio_gpu_cmd_host_wait(struct virtio_gpu_device *vgdev,
 
 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
+
+static void virtio_gpu_cmd_get_resource_layout_cb(struct virtio_gpu_device *vgdev,
+						  struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtio_gpu_resp_resource_layout *resp =
+		(struct virtio_gpu_resp_resource_layout *)vbuf->resp_buf;
+	struct virtio_gpu_object *bo = vbuf->resp_cb_data;
+	int i;
+
+	vbuf->resp_cb_data = NULL;
+
+	if (resp->hdr.type != VIRTIO_GPU_RESP_OK_RESOURCE_LAYOUT) {
+		atomic_set(&bo->layout_state, STATE_ERR);
+		goto out;
+	}
+
+	bo->modifier = le64_to_cpu(resp->modifier);
+	bo->num_planes = le32_to_cpu(resp->num_planes);
+	for (i = 0; i < VIRTIO_GPU_RES_MAX_PLANES; i++) {
+		bo->planes[i].offset = le64_to_cpu(resp->planes[i].offset);
+		bo->planes[i].size = le64_to_cpu(resp->planes[i].size);
+		bo->planes[i].stride = le32_to_cpu(resp->planes[i].stride);
+	}
+	smp_wmb();
+	atomic_set(&bo->layout_state, STATE_OK);
+
+out:
+	drm_gem_object_put(&bo->base.base);
+	wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev,
+				       struct virtio_gpu_object *bo)
+{
+	struct virtio_gpu_resource_query_layout *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+	void *resp_buf;
+
+	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_resource_layout),
+			   GFP_KERNEL);
+	if (!resp_buf)
+		return -ENOMEM;
+
+	cmd_p = virtio_gpu_alloc_cmd_resp
+		(vgdev, &virtio_gpu_cmd_get_resource_layout_cb, &vbuf,
+		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_resource_layout),
+		 resp_buf);
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_QUERY_LAYOUT);
+	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+	drm_gem_object_get(&bo->base.base);
+	vbuf->resp_cb_data = bo;
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	return 0;
+}
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index ac804cef737c..7ec57d7e261a 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -48,6 +48,7 @@ extern "C" {
 #define DRM_VIRTGPU_GET_CAPS  0x09
 #define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
 #define DRM_VIRTGPU_CONTEXT_INIT 0x0b
+#define DRM_VIRTGPU_RESOURCE_QUERY_LAYOUT 0x0c
 
 #define VIRTGPU_EXECBUF_FENCE_FD_IN	0x01
 #define VIRTGPU_EXECBUF_FENCE_FD_OUT	0x02
@@ -97,6 +98,7 @@ struct drm_virtgpu_execbuffer {
 #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing  */
 #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
 #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_RESOURCE_QUERY_LAYOUT 8 /* DRM_VIRTGPU_RESOURCE_QUERY_LAYOUT (also needs cap) */
 
 struct drm_virtgpu_getparam {
 	__u64 param;
@@ -212,6 +214,19 @@ struct drm_virtgpu_context_init {
 	__u64 ctx_set_params;
 };
 
+#define VIRTIO_GPU_MAX_RESOURCE_PLANES 4
+struct drm_virtgpu_resource_query_layout {
+	__u32 handle;
+	__u32 num_planes;
+	__u64 modifier;
+	struct {
+		__u64 offset;
+		__u64 size;
+		__u32 stride;
+		__u32 padding;
+	} planes[VIRTIO_GPU_MAX_RESOURCE_PLANES];
+};
+
 /*
  * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
  * effect.  The event size is sizeof(drm_event), since there is no additional
@@ -262,6 +277,10 @@ struct drm_virtgpu_context_init {
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT,		\
 		struct drm_virtgpu_context_init)
 
+#define DRM_IOCTL_VIRTGPU_RESOURCE_QUERY_LAYOUT				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_QUERY_LAYOUT,	\
+		struct drm_virtgpu_resource_query_layout)
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index c9bf921303cc..c7b22861af69 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -70,6 +70,11 @@
  */
 #define VIRTIO_GPU_F_CONTEXT_FENCE_WAIT  5
 
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_QUERY_LAYOUT
+ */
+#define VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT 6
+
 enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_UNDEFINED = 0,
 
@@ -101,6 +106,7 @@ enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
 	VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
 	VIRTIO_GPU_CMD_WAIT_FENCE,
+	VIRTIO_GPU_CMD_RESOURCE_QUERY_LAYOUT,
 
 	/* cursor commands */
 	VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -114,6 +120,7 @@ enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_RESP_OK_EDID,
 	VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
 	VIRTIO_GPU_RESP_OK_MAP_INFO,
+	VIRTIO_GPU_RESP_OK_RESOURCE_LAYOUT,
 
 	/* error responses */
 	VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -474,4 +481,27 @@ struct virtio_gpu_cmd_host_wait {
 	__le64 fence_id;
 };
 
+/* VIRTIO_GPU_CMD_RESOURCE_QUERY_LAYOUT */
+struct virtio_gpu_resource_query_layout {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 resource_id;
+	__le32 padding;
+};
+
+
+/* VIRTIO_GPU_RESP_OK_RESOURCE_LAYOUT */
+#define VIRTIO_GPU_RES_MAX_PLANES 4
+struct virtio_gpu_resp_resource_layout {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le64 modifier;
+	__le32 num_planes;
+	__le32 padding;
+	struct virtio_gpu_resource_plane {
+		__le64 offset;
+		__le64 size;
+		__le32 stride;
+		__le32 padding;
+	} planes[VIRTIO_GPU_RES_MAX_PLANES];
+};
+
 #endif
-- 
2.34.1



More information about the amd-gfx mailing list