[PATCH 2/2] drm/gud: Use scatter-gather USB bulk transfer

Noralf Trønnes noralf at tronnes.org
Mon Mar 29 18:01:20 UTC 2021


There'a limit to how big a kmalloc buffer can be, and as memory gets
fragmented it becomes more difficult to get big buffers. The downside of
smaller buffers is that the driver has to split the transfer up which
hampers performance. Compression might also take a hit because of the
splitting.

Solve this by allocating the transfer buffer using vmalloc and create a
SG table to be passed on to the USB subsystem. vmalloc_32() is used to
avoid DMA bounce buffers on USB controllers that can only access 32-bit
addresses.

This also solves the problem that split transfers can give host side
tearing since flushing is decoupled from rendering.

Signed-off-by: Noralf Trønnes <noralf at tronnes.org>
---
 drivers/gpu/drm/gud/gud_drv.c      | 49 +++++++++++++++++++++---------
 drivers/gpu/drm/gud/gud_internal.h |  2 ++
 drivers/gpu/drm/gud/gud_pipe.c     | 47 ++++++++++++++++++++++++----
 3 files changed, 77 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 820c7331b3b3..8f9bcf6561e8 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -394,13 +394,40 @@ static const struct drm_driver gud_drm_driver = {
 	.minor			= 0,
 };
 
+static int gud_alloc_bulk_buffer(struct gud_device *gdrm)
+{
+	unsigned int i, num_pages;
+	struct page **pages;
+	void *ptr;
+	int ret;
+
+	gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len);
+	if (!gdrm->bulk_buf)
+		return -ENOMEM;
+
+	num_pages = PAGE_ALIGN(gdrm->bulk_len) >> PAGE_SHIFT;
+	pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
+		pages[i] = vmalloc_to_page(ptr);
+
+	ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages,
+					0, gdrm->bulk_len, GFP_KERNEL);
+	kfree(pages);
+
+	return ret;
+}
+
 static void gud_free_buffers_and_mutex(void *data)
 {
 	struct gud_device *gdrm = data;
 
 	vfree(gdrm->compress_buf);
 	gdrm->compress_buf = NULL;
-	kfree(gdrm->bulk_buf);
+	sg_free_table(&gdrm->bulk_sgt);
+	vfree(gdrm->bulk_buf);
 	gdrm->bulk_buf = NULL;
 	mutex_destroy(&gdrm->ctrl_lock);
 	mutex_destroy(&gdrm->damage_lock);
@@ -538,24 +565,16 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
 	if (desc.max_buffer_size)
 		max_buffer_size = le32_to_cpu(desc.max_buffer_size);
-retry:
-	/*
-	 * Use plain kmalloc here since devm_kmalloc() places struct devres at the beginning
-	 * of the buffer it allocates. This wastes a lot of memory when allocating big buffers.
-	 * Asking for 2M would actually allocate 4M. This would also prevent getting the biggest
-	 * possible buffer potentially leading to split transfers.
-	 */
-	gdrm->bulk_buf = kmalloc(max_buffer_size, GFP_KERNEL | __GFP_NOWARN);
-	if (!gdrm->bulk_buf) {
-		max_buffer_size = roundup_pow_of_two(max_buffer_size) / 2;
-		if (max_buffer_size < SZ_512K)
-			return -ENOMEM;
-		goto retry;
-	}
+	if (max_buffer_size > SZ_64M)
+		max_buffer_size = SZ_64M; /* safeguard */
 
 	gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
 	gdrm->bulk_len = max_buffer_size;
 
+	ret = gud_alloc_bulk_buffer(gdrm);
+	if (ret)
+		return ret;
+
 	if (gdrm->compression & GUD_COMPRESSION_LZ4) {
 		gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
 		if (!gdrm->lz4_comp_mem)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index de2f2d2dbc60..1bb65a46c347 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -5,6 +5,7 @@
 
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/scatterlist.h>
 #include <linux/usb.h>
 #include <linux/workqueue.h>
 #include <uapi/drm/drm_fourcc.h>
@@ -26,6 +27,7 @@ struct gud_device {
 	unsigned int bulk_pipe;
 	void *bulk_buf;
 	size_t bulk_len;
+	struct sg_table bulk_sgt;
 
 	u8 compression;
 	void *lz4_comp_mem;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 2f83ab6b8e61..7dd63a8c7c2d 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -220,13 +220,51 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 	return ret;
 }
 
+struct gud_usb_bulk_context {
+	struct timer_list timer;
+	struct usb_sg_request sgr;
+};
+
+static void gud_usb_bulk_timeout(struct timer_list *t)
+{
+	struct gud_usb_bulk_context *timer = from_timer(timer, t, timer);
+
+	usb_sg_cancel(&timer->sgr);
+}
+
+static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
+{
+	struct gud_usb_bulk_context ctx;
+	int ret;
+
+	ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
+			  gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
+	mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
+
+	usb_sg_wait(&ctx.sgr);
+
+	if (!del_timer_sync(&ctx.timer))
+		ret = -ETIMEDOUT;
+	else if (ctx.sgr.status < 0)
+		ret = ctx.sgr.status;
+	else if (ctx.sgr.bytes != len)
+		ret = -EIO;
+
+	destroy_timer_on_stack(&ctx.timer);
+
+	return ret;
+}
+
 static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 			  const struct drm_format_info *format, struct drm_rect *rect)
 {
-	struct usb_device *usb = gud_to_usb_device(gdrm);
 	struct gud_set_buffer_req req;
-	int ret, actual_length;
 	size_t len, trlen;
+	int ret;
 
 	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
@@ -255,10 +293,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 			return ret;
 	}
 
-	ret = usb_bulk_msg(usb, gdrm->bulk_pipe, gdrm->bulk_buf, trlen,
-			   &actual_length, msecs_to_jiffies(3000));
-	if (!ret && trlen != actual_length)
-		ret = -EIO;
+	ret = gud_usb_bulk(gdrm, trlen);
 	if (ret)
 		gdrm->stats_num_errors++;
 
-- 
2.23.0



More information about the dri-devel mailing list