[PATCH 2/2] drm/virtio: Modify RESOURCE_GET_LAYOUT ioctl
Julia Zhang
julia.zhang at amd.com
Fri Nov 10 07:16:32 UTC 2023
Modify RESOURCE_GET_LAYOUT ioctl to handle the use case that query
correct stride for guest linear resource before it is created.
Signed-off-by: Julia Zhang <julia.zhang at amd.com>
---
drivers/gpu/drm/virtio/virtgpu_drv.h | 26 ++++++++------
drivers/gpu/drm/virtio/virtgpu_ioctl.c | 47 ++++++++++++--------------
drivers/gpu/drm/virtio/virtgpu_vq.c | 35 +++++++++++--------
include/uapi/drm/virtgpu_drm.h | 6 ++--
include/uapi/linux/virtio_gpu.h | 8 ++---
5 files changed, 66 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d6fc0d4ecb7d..82dffb3e4c6b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -93,15 +93,6 @@ struct virtio_gpu_object {
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
- atomic_t layout_state;
- struct {
- uint64_t offset;
- uint64_t size;
- uint32_t stride;
- } planes[VIRTIO_GPU_RES_MAX_PLANES];
- uint64_t modifier;
- uint32_t num_planes;
-
int uuid_state;
uuid_t uuid;
@@ -225,6 +216,16 @@ struct virtio_gpu_drv_cap_cache {
atomic_t is_valid;
};
+struct virtio_gpu_query_info {
+ uint32_t num_planes;
+ uint64_t modifier;
+ struct {
+ uint64_t offset;
+ uint32_t stride;
+ } planes [VIRTIO_GPU_MAX_RESOURCE_PLANES];
+ atomic_t is_valid;
+};
+
struct virtio_gpu_device {
struct drm_device *ddev;
@@ -448,7 +449,12 @@ void virtio_gpu_cmd_host_wait(struct virtio_gpu_device *vgdev,
int
virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo);
+ struct virtio_gpu_query_info *bo_info,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ uint32_t bind,
+ uint32_t hw_res_handle);
/* virtgpu_display.c */
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 51d04460d0d8..034a7c0927a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -685,9 +685,9 @@ static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev,
{
struct drm_virtgpu_resource_query_layout *args = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct drm_gem_object *obj;
- struct virtio_gpu_object *bo;
- int layout_state;
+ struct drm_gem_object *obj = NULL;
+ struct virtio_gpu_object *bo = NULL;
+ struct virtio_gpu_query_info bo_info = {0};
int ret = 0;
int i;
@@ -696,50 +696,45 @@ static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev,
return -EINVAL;
}
- obj = drm_gem_object_lookup(file, args->handle);
- if (obj == NULL) {
- DRM_ERROR("invalid handle 0x%x\n", args->handle);
- return -ENOENT;
- }
- bo = gem_to_virtio_gpu_obj(obj);
-
- layout_state = atomic_read(&bo->layout_state);
- if (layout_state == STATE_ERR) {
- ret = -EINVAL;
- goto out;
- } else if (layout_state == STATE_OK) {
- goto valid;
+ if (args->handle > 0) {
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("invalid handle 0x%x\n", args->handle);
+ return -ENOENT;
+ }
+ bo = gem_to_virtio_gpu_obj(obj);
}
- ret = virtio_gpu_cmd_get_resource_layout(vgdev, bo);
+ ret = virtio_gpu_cmd_get_resource_layout(vgdev, &bo_info, args->width,
+ args->height, args->format,
+ args->bind, bo ? bo->hw_res_handle : 0);
if (ret)
goto out;
ret = wait_event_timeout(vgdev->resp_wq,
- atomic_read(&bo->layout_state) == STATE_OK,
+ atomic_read(&bo_info.is_valid),
5 * HZ);
if (!ret)
goto out;
valid:
smp_rmb();
- WARN_ON(atomic_read(&bo->layout_state) != STATE_OK);
- args->num_planes = bo->num_planes;
- args->modifier = bo->modifier;
+ WARN_ON(atomic_read(&bo_info.is_valid));
+ args->num_planes = bo_info.num_planes;
+ args->modifier = bo_info.modifier;
for (i = 0; i < args->num_planes; i++) {
- args->planes[i].offset = bo->planes[i].offset;
- args->planes[i].size = bo->planes[i].size;
- args->planes[i].stride = bo->planes[i].stride;
+ args->planes[i].offset = bo_info.planes[i].offset;
+ args->planes[i].stride = bo_info.planes[i].stride;
}
for (; i < VIRTIO_GPU_MAX_RESOURCE_PLANES; i++) {
args->planes[i].offset = 0;
- args->planes[i].size = 0;
args->planes[i].stride = 0;
}
ret = 0;
out:
- drm_gem_object_put(obj);
+ if (obj)
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 8d0a2eaec11c..95da6d0008f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1339,33 +1339,36 @@ static void virtio_gpu_cmd_get_resource_layout_cb(struct virtio_gpu_device *vgde
{
struct virtio_gpu_resp_resource_layout *resp =
(struct virtio_gpu_resp_resource_layout *)vbuf->resp_buf;
- struct virtio_gpu_object *bo = vbuf->resp_cb_data;
+ struct virtio_gpu_query_info *bo_info = vbuf->resp_cb_data;
int i;
vbuf->resp_cb_data = NULL;
if (resp->hdr.type != VIRTIO_GPU_RESP_OK_RESOURCE_LAYOUT) {
- atomic_set(&bo->layout_state, STATE_ERR);
+ atomic_set(&bo_info->is_valid, 0);
goto out;
}
- bo->modifier = le64_to_cpu(resp->modifier);
- bo->num_planes = le32_to_cpu(resp->num_planes);
- for (i = 0; i < VIRTIO_GPU_RES_MAX_PLANES; i++) {
- bo->planes[i].offset = le64_to_cpu(resp->planes[i].offset);
- bo->planes[i].size = le64_to_cpu(resp->planes[i].size);
- bo->planes[i].stride = le32_to_cpu(resp->planes[i].stride);
+ bo_info->modifier = le64_to_cpu(resp->modifier);
+ bo_info->num_planes = le32_to_cpu(resp->num_planes);
+ for (i = 0; i < bo_info->num_planes; i++) {
+ bo_info->planes[i].stride = le32_to_cpu(resp->planes[i].stride);
+ bo_info->planes[i].offset = le32_to_cpu(resp->planes[i].offset);
}
smp_wmb();
- atomic_set(&bo->layout_state, STATE_OK);
+ atomic_set(&bo_info->is_valid, 1);
out:
- drm_gem_object_put(&bo->base.base);
wake_up_all(&vgdev->resp_wq);
}
int virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo)
+ struct virtio_gpu_query_info *bo_info,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ uint32_t bind,
+ uint32_t hw_res_handle)
{
struct virtio_gpu_resource_query_layout *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -1383,9 +1386,13 @@ int virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev,
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_QUERY_LAYOUT);
- cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- drm_gem_object_get(&bo->base.base);
- vbuf->resp_cb_data = bo;
+ cmd_p->resource_id = cpu_to_le32(hw_res_handle);
+ cmd_p->width = cpu_to_le32(width);
+ cmd_p->height = cpu_to_le32(height);
+ cmd_p->format = cpu_to_le32(format);
+ cmd_p->bind = cpu_to_le32(bind);
+ vbuf->resp_cb_data = bo_info;
+
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 7ec57d7e261a..41f9c592aeaf 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -217,13 +217,15 @@ struct drm_virtgpu_context_init {
#define VIRTIO_GPU_MAX_RESOURCE_PLANES 4
struct drm_virtgpu_resource_query_layout {
__u32 handle;
+ __u32 width;
+ __u32 height;
+ __u32 format;
+ __u32 bind;
__u32 num_planes;
__u64 modifier;
struct {
__u64 offset;
- __u64 size;
__u32 stride;
- __u32 padding;
} planes[VIRTIO_GPU_MAX_RESOURCE_PLANES];
};
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index c7b22861af69..3e653f018dd7 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -485,7 +485,10 @@ struct virtio_gpu_cmd_host_wait {
struct virtio_gpu_resource_query_layout {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
- __le32 padding;
+ __le32 width;
+ __le32 height;
+ __le32 format;
+ __le32 bind;
};
@@ -495,12 +498,9 @@ struct virtio_gpu_resp_resource_layout {
struct virtio_gpu_ctrl_hdr hdr;
__le64 modifier;
__le32 num_planes;
- __le32 padding;
struct virtio_gpu_resource_plane {
__le64 offset;
- __le64 size;
__le32 stride;
- __le32 padding;
} planes[VIRTIO_GPU_RES_MAX_PLANES];
};
--
2.34.1
More information about the amd-gfx
mailing list