[PATCH 1/2] drm: Drop dev->event_lock spinlock around faulting copy_to_user()

Chris Wilson chris at chris-wilson.co.uk
Wed Nov 25 06:39:02 PST 2015


In

commit cdd1cf799bd24ac0a4184549601ae302267301c5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Dec 4 21:03:25 2014 +0000

    drm: Make drm_read() more robust against multithreaded races

I fixed the races by serialising the use of the event by extending the
dev->event_lock. However, as Thomas pointed out, the copy_to_user() may
fault (even the __copy_to_user_inatomic() variant used here) and calling
into the driver backend with the spinlock held is bad news. Therefore we
have to drop the spinlock before the copy, but that exposes us to the
old race whereby a second reader could see an out-of-order event (as the
first reader may claim the first request but fail to copy it back to
userspace and so on returning it to the event list it will be behind the
current event being copied by the second reader).

Reported-by: Thomas Hellstrom <thellstrom at vmware.com>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Thomas Hellstrom <thellstrom at vmware.com>
Cc: Takashi Iwai <tiwai at suse.de>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/gpu/drm/drm_fops.c | 40 ++++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..eb8702d39e7d 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -488,9 +488,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
 	if (!access_ok(VERIFY_WRITE, buffer, count))
 		return -EFAULT;
 
-	spin_lock_irq(&dev->event_lock);
 	for (;;) {
-		if (list_empty(&file_priv->event_list)) {
+		struct drm_pending_event *e = NULL;
+
+		spin_lock_irq(&dev->event_lock);
+		if (!list_empty(&file_priv->event_list)) {
+			e = list_first_entry(&file_priv->event_list,
+					struct drm_pending_event, link);
+			file_priv->event_space += e->event->length;
+			list_del(&e->link);
+		}
+		spin_unlock_irq(&dev->event_lock);
+
+		if (e == NULL) {
 			if (ret)
 				break;
 
@@ -499,36 +509,34 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
 				break;
 			}
 
-			spin_unlock_irq(&dev->event_lock);
 			ret = wait_event_interruptible(file_priv->event_wait,
 						       !list_empty(&file_priv->event_list));
-			spin_lock_irq(&dev->event_lock);
 			if (ret < 0)
 				break;
 
 			ret = 0;
 		} else {
-			struct drm_pending_event *e;
-
-			e = list_first_entry(&file_priv->event_list,
-					     struct drm_pending_event, link);
-			if (e->event->length + ret > count)
+			unsigned length = e->event->length;
+
+			if (length > count - ret) {
+put_back_event:
+				spin_lock_irq(&dev->event_lock);
+				file_priv->event_space -= length;
+				list_add(&e->link, &file_priv->event_list);
+				spin_unlock_irq(&dev->event_lock);
 				break;
+			}
 
-			if (__copy_to_user_inatomic(buffer + ret,
-						    e->event, e->event->length)) {
+			if (copy_to_user(buffer + ret, e->event, length)) {
 				if (ret == 0)
 					ret = -EFAULT;
-				break;
+				goto put_back_event;
 			}
 
-			file_priv->event_space += e->event->length;
-			ret += e->event->length;
-			list_del(&e->link);
+			ret += length;
 			e->destroy(e);
 		}
 	}
-	spin_unlock_irq(&dev->event_lock);
 
 	return ret;
 }
-- 
2.6.2



More information about the dri-devel mailing list