[PATCH 1/3] virtgpu: add virtio_gpu_queue_cleanup()

Manos Pitsidianakis manos.pitsidianakis at linaro.org
Mon May 5 08:59:14 UTC 2025


When virtio_gpu_remove() is called, the queues are flushed and used
buffers from the virtqueues are freed. However, the VIRTIO device might
have left unused buffers in the avail rings, resulting in memory leaks.
KASAN, slab debug and drm_mm_takedown all report the errors:

 BUG virtio-gpu-vbufs: Objects remaining in virtio-gpu-vbufs on
 __kmem_cache_shutdown()
 <- Snipped backtrace ->
 Object 0xffffff801b07c008 @offset=8
 Allocated in virtio_gpu_get_vbuf.isra.0+0x38/0xb0 age=4314 cpu=3
 pid=540
  kmem_cache_alloc+0x330/0x3a8
  virtio_gpu_get_vbuf.isra.0+0x38/0xb0
  virtio_gpu_cmd_resource_create_3d+0x60/0x1f0
  virtio_gpu_object_create+0x388/0x468
  virtio_gpu_resource_create_ioctl+0x1f0/0x420
  drm_ioctl_kernel+0x170/0x248
  drm_ioctl+0x33c/0x680
  __arm64_sys_ioctl+0xdc/0x128
  invoke_syscall+0x84/0x1c8
  el0_svc_common.constprop.0+0x11c/0x150
  do_el0_svc+0x38/0x50
  el0_svc+0x38/0x70
  el0t_64_sync_handler+0x120/0x130
  el0t_64_sync+0x190/0x198

 ------------[ cut here ]------------
 kmem_cache_destroy virtio-gpu-vbufs: Slab cache still has objects when
 called from virtio_gpu_free_vbufs+0x48/0x70
 WARNING: CPU: 0 PID: 483 at mm/slab_common.c:498
 kmem_cache_destroy+0x114/0x178
 <- Snipped info ->

 ------------[ cut here ]------------
 Memory manager not clean during takedown.
 <- Snipped info ->
 ---[ end trace 0000000000000000 ]---
 [drm:drm_mm_takedown] *ERROR* node [001000eb + 00000080]: inserted at
  drm_mm_insert_node_in_range+0x48c/0x6a8
  drm_vma_offset_add+0x84/0xb0
  drm_gem_create_mmap_offset+0x50/0x70
  __drm_gem_shmem_create+0x94/0x1d8
  drm_gem_shmem_create+0x1c/0x30
  virtio_gpu_object_create+0x68/0x468
  virtio_gpu_resource_create_ioctl+0x1f0/0x420
  drm_ioctl_kernel+0x170/0x248
  drm_ioctl+0x33c/0x680
  __arm64_sys_ioctl+0xdc/0x128
  invoke_syscall+0x84/0x1c8
  el0_svc_common.constprop.0+0x11c/0x150
  do_el0_svc+0x38/0x50
  el0_svc+0x38/0x70
  el0t_64_sync_handler+0x120/0x130
  el0t_64_sync+0x190/0x198
 [drm:drm_mm_takedown] *ERROR* node [0010016b + 000000eb]: inserted at
 <- Snipped info ->

The leaked objects are also reported in /sys/kernel/debug/kmemleak.

This commit adds a cleanup function that is called from
virtio_gpu_deinit().

The function cleans up any unused buffers from the virtqueues and calls
the appropriate freeing functions. This is safe to do so because
virtio_gpu_deinit() calls virtio_reset_device() before calling the
cleanup function, ensuring no one is going to read from the virtqueues.

The cleanup function checks for used buffers on the queues, and
additionally calls virtqueue_detach_unused_buf on each queue to get any
buffers that did not have time to be processed by the VIRTIO backend.

Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis at linaro.org>
---
 drivers/gpu/drm/virtio/virtgpu_drv.h |  1 +
 drivers/gpu/drm/virtio/virtgpu_kms.c |  1 +
 drivers/gpu/drm/virtio/virtgpu_vq.c  | 55 ++++++++++++++++++++++++++++++++++++
 3 files changed, 57 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f17660a71a3e7a22b5d4fefa6b754c227a294037..b3d367be6f204dbc98bf1c6e5c43a37ac8c0d8b3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -419,6 +419,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq);
 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
 void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
 void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_queue_cleanup(struct virtio_gpu_device *vgdev);
 void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
 
 int
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 7dfb2006c561ca13b15d979ddb8bf2d753e35dad..da70d9248072b64786a5d48b71bccaa80b8aae8f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -286,6 +286,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
 	flush_work(&vgdev->cursorq.dequeue_work);
 	flush_work(&vgdev->config_changed_work);
 	virtio_reset_device(vgdev->vdev);
+	virtio_gpu_queue_cleanup(vgdev);
 	vgdev->vdev->config->del_vqs(vgdev->vdev);
 }
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 55a15e247dd1ad53a2b43b19fca8879b956f0e1a..fd150827e413cedcec4d82b0da8d792cb67e243f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -299,6 +299,61 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 	wake_up(&vgdev->cursorq.ack_queue);
 }
 
+/* deallocate all in-flight virtqueue elements */
+void virtio_gpu_queue_cleanup(struct virtio_gpu_device *vgdev)
+{
+	struct list_head reclaim_list;
+	struct virtio_gpu_vbuffer *entry, *tmp;
+
+	INIT_LIST_HEAD(&reclaim_list);
+	spin_lock(&vgdev->ctrlq.qlock);
+	do {
+		virtqueue_disable_cb(vgdev->ctrlq.vq);
+		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
+	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
+	/* detach unused buffers */
+	while ((entry = virtqueue_detach_unused_buf(vgdev->ctrlq.vq)) != NULL) {
+		if (entry->resp_cb)
+			entry->resp_cb(vgdev, entry);
+		if (entry->objs)
+			virtio_gpu_array_put_free(entry->objs);
+		free_vbuf(vgdev, entry);
+	}
+	spin_unlock(&vgdev->ctrlq.qlock);
+
+	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+		if (entry->resp_cb)
+			entry->resp_cb(vgdev, entry);
+		if (entry->objs)
+			virtio_gpu_array_put_free(entry->objs);
+		list_del(&entry->list);
+		free_vbuf(vgdev, entry);
+	}
+
+	spin_lock(&vgdev->cursorq.qlock);
+	do {
+		virtqueue_disable_cb(vgdev->cursorq.vq);
+		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
+	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
+	spin_unlock(&vgdev->cursorq.qlock);
+	while ((entry = virtqueue_detach_unused_buf(vgdev->cursorq.vq)) != NULL) {
+		if (entry->resp_cb)
+			entry->resp_cb(vgdev, entry);
+		if (entry->objs)
+			virtio_gpu_array_put_free(entry->objs);
+		free_vbuf(vgdev, entry);
+	}
+
+	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+		if (entry->resp_cb)
+			entry->resp_cb(vgdev, entry);
+		if (entry->objs)
+			virtio_gpu_array_put_free(entry->objs);
+		list_del(&entry->list);
+		free_vbuf(vgdev, entry);
+	}
+}
+
 /* Create sg_table from a vmalloc'd buffer. */
 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 {

-- 
2.47.2



More information about the dri-devel mailing list