[PATCH v2 20/23] drm/virtio: implement blob resources: fix stride discrepancy
Gurchetan Singh
gurchetansingh at chromium.org
Wed Sep 2 21:08:44 UTC 2020
The old transfer ioctls may work on blob resources, and the guest
may have a image view on the blob resources such that the stride is
not equal to width * bytes_per_pixel.
For host-only blobs, we can repurpose the transfer ioctls to synchronize
caches as well.
Also, with seamless Wayland integration between guest/host looking
increasingly attractive, it also makes sense to keep track of
one value for stride.
Signed-off-by: Gurchetan Singh <gurchetansingh at chromium.org>
Acked-by: Tomeu Vizoso <tomeu.vizoso at collabora.com>
---
drivers/gpu/drm/virtio/virtgpu_drv.h | 4 +++
drivers/gpu/drm/virtio/virtgpu_ioctl.c | 35 +++++++++++++++++++++++---
drivers/gpu/drm/virtio/virtgpu_vq.c | 14 +++++++++--
3 files changed, 47 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 689297e1fff87..6162865c162df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -351,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 7a2430e34e003..0075995a9f5b8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -311,6 +311,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -324,6 +325,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -333,9 +345,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
ret = -ENOMEM;
goto err_unlock;
}
+
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, vfpriv->ctx_id, offset, args->level,
- &args->box, objs, fence);
+ (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
+ args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
@@ -353,6 +366,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -362,6 +376,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
@@ -369,6 +389,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
objs, NULL);
} else {
virtio_gpu_create_context(dev, file);
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -380,8 +406,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
- vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &args->box, objs, fence);
+ vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+ args->stride, args->layer_stride, &args->box, objs,
+ fence);
dma_fence_put(&fence->f);
}
virtio_gpu_notify(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index b77d405e56b28..363e30e19db4c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1012,6 +1012,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1020,12 +1022,14 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
+ }
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1038,6 +1042,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1045,6 +1051,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1064,6 +1072,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
--
2.26.2
More information about the dri-devel
mailing list