[PATCH 2/2] drm/virtio: rely on drm shmem helpers to take care of dma_map/dma_unmap

Gurchetan Singh gurchetansingh at chromium.org
Fri Jun 12 01:36:25 UTC 2020


Fixes a double-free regression:

[    4.357928]  drm_gem_shmem_free_object+0xb4/0x100
[    4.358983]  virtio_gpu_dequeue_ctrl_func+0xd9/0x290
[    4.360343]  process_one_work+0x1d2/0x3a0
[    4.361581]  worker_thread+0x45/0x3c0
[    4.362645]  kthread+0xf6/0x130
[    4.363543]  ? process_one_work+0x3a0/0x3a0
[    4.364770]  ? kthread_park+0x80/0x80
[    4.365799]  ret_from_fork+0x35/0x40
[    4.367103] Modules linked in:
[    4.367958] CR2: 0000000000000018
[    4.368857] ---[ end trace db84f7a2974d5c79 ]---
[    4.370118] RIP: 0010:dma_direct_unmap_sg+0x1f/0x60

We can also go back to the prior state aswell.

Fixes: d323bb44e4d2 ("drm/virtio: Call the right shmem helpers")
Signed-off-by: Gurchetan Singh <gurchetansingh at chromium.org>
---
 drivers/gpu/drm/virtio/virtgpu_drv.h    |  1 -
 drivers/gpu/drm/virtio/virtgpu_object.c | 25 ++++++-------------------
 drivers/gpu/drm/virtio/virtgpu_vq.c     | 12 ++++++------
 3 files changed, 12 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 49bebdee6d91..66af5ea1304b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -78,7 +78,6 @@ struct virtio_gpu_object {
 struct virtio_gpu_object_shmem {
 	struct virtio_gpu_object base;
 	struct sg_table *pages;
-	uint32_t mapped;
 };
 
 #define to_virtio_gpu_shmem(virtio_gpu_object) \
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 346cef5ce251..ec42c5532910 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -69,16 +69,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 	if (virtio_gpu_is_shmem(bo)) {
 		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
-
 		if (shmem->pages) {
-			if (shmem->mapped) {
-				dma_unmap_sg(vgdev->vdev->dev.parent,
-					     shmem->pages->sgl, shmem->mapped,
-					     DMA_TO_DEVICE);
-				shmem->mapped = 0;
-			}
-
-			sg_free_table(shmem->pages);
 			shmem->pages = NULL;
 			drm_gem_shmem_unpin(&bo->base.base);
 		}
@@ -123,6 +114,7 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
 						size_t size)
 {
+	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_object_shmem *shmem;
 	struct drm_gem_shmem_object *dshmem;
 
@@ -133,6 +125,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
 	dshmem = &shmem->base.base;
 	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
 	dshmem->map_cached = true;
+	dshmem->skip_dma_api = virtio_has_iommu_quirk(vgdev->vdev);
 	return &dshmem->base;
 }
 
@@ -141,7 +134,6 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
 					struct virtio_gpu_mem_entry **ents,
 					unsigned int *nents)
 {
-	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 	struct scatterlist *sg;
 	int si, ret;
@@ -156,15 +148,10 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
 		return -EINVAL;
 	}
 
-	if (use_dma_api) {
-		shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
-					   shmem->pages->sgl,
-					   shmem->pages->nents,
-					   DMA_TO_DEVICE);
-		*nents = shmem->mapped;
-	} else {
+	if (!bo->base.skip_dma_api)
+		*nents = bo->base.dma_map_count;
+	else
 		*nents = shmem->pages->nents;
-	}
 
 	*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
 			      GFP_KERNEL);
@@ -174,7 +161,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
 	}
 
 	for_each_sg(shmem->pages->sgl, sg, *nents, si) {
-		(*ents)[si].addr = cpu_to_le64(use_dma_api
+		(*ents)[si].addr = cpu_to_le64(!bo->base.skip_dma_api
 					       ? sg_dma_address(sg)
 					       : sg_phys(sg));
 		(*ents)[si].length = cpu_to_le32(sg->length);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9e663a5d9952..117e4aa69ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -599,12 +599,12 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
-	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
-	if (use_dma_api)
+	if (!bo->base.skip_dma_api)
 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-				       shmem->pages->sgl, shmem->pages->nents,
+				       shmem->pages->sgl,
+				       bo->base.dma_map_count,
 				       DMA_TO_DEVICE);
 
 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1015,12 +1015,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 	struct virtio_gpu_transfer_host_3d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
-	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
-	if (use_dma_api)
+	if (!bo->base.skip_dma_api)
 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-				       shmem->pages->sgl, shmem->pages->nents,
+				       shmem->pages->sgl,
+				       bo->base.dma_map_count,
 				       DMA_TO_DEVICE);
 
 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
-- 
2.25.1



More information about the dri-devel mailing list