[Intel-gfx] [PATCH] drm: Make drm_read() more robust against multithreaded races
Takashi Iwai
tiwai at suse.de
Fri Dec 5 00:42:35 PST 2014
At Thu, 4 Dec 2014 21:03:25 +0000,
Chris Wilson wrote:
>
> The current implementation of drm_read() faces a number of issues:
>
> 1. Upon an error, it consumes the event which may lead to the client
> blocking.
> 2. Upon an error, it forgets about events already copied
> 3. If it fails to copy a single event with O_NONBLOCK it falls into a
> infinite loop of reporting EAGAIN.
> 3. There is a race between multiple waiters and blocking reads of the
> events list.
>
> Here, we inline drm_dequeue_event() into drm_read() so that we can take
> the spinlock around the list walking and event copying, and importantly
> reorder the error handling to avoid the issues above.
>
> Cc: Takashi Iwai <tiwai at suse.de>
Reviewed-by: Takashi Iwai <tiwai at suse.de>
Takashi
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/drm_fops.c | 90 ++++++++++++++++++++++------------------------
> 1 file changed, 43 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
> index 91e1105f2800..076dd606b580 100644
> --- a/drivers/gpu/drm/drm_fops.c
> +++ b/drivers/gpu/drm/drm_fops.c
> @@ -478,63 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
> }
> EXPORT_SYMBOL(drm_release);
>
> -static bool
> -drm_dequeue_event(struct drm_file *file_priv,
> - size_t total, size_t max, struct drm_pending_event **out)
> +ssize_t drm_read(struct file *filp, char __user *buffer,
> + size_t count, loff_t *offset)
> {
> + struct drm_file *file_priv = filp->private_data;
> struct drm_device *dev = file_priv->minor->dev;
> - struct drm_pending_event *e;
> - unsigned long flags;
> - bool ret = false;
> -
> - spin_lock_irqsave(&dev->event_lock, flags);
> + ssize_t ret = 0;
>
> - *out = NULL;
> - if (list_empty(&file_priv->event_list))
> - goto out;
> - e = list_first_entry(&file_priv->event_list,
> - struct drm_pending_event, link);
> - if (e->event->length + total > max)
> - goto out;
> + if (!access_ok(VERIFY_WRITE, buffer, count))
> + return -EFAULT;
>
> - file_priv->event_space += e->event->length;
> - list_del(&e->link);
> - *out = e;
> - ret = true;
> + spin_lock_irq(&dev->event_lock);
> + for (;;) {
> + if (list_empty(&file_priv->event_list)) {
> + if (ret)
> + break;
>
> -out:
> - spin_unlock_irqrestore(&dev->event_lock, flags);
> - return ret;
> -}
> -
> -ssize_t drm_read(struct file *filp, char __user *buffer,
> - size_t count, loff_t *offset)
> -{
> - struct drm_file *file_priv = filp->private_data;
> - struct drm_pending_event *e;
> - size_t total;
> - ssize_t ret;
> + if (filp->f_flags & O_NONBLOCK) {
> + ret = -EAGAIN;
> + break;
> + }
>
> - if ((filp->f_flags & O_NONBLOCK) == 0) {
> - ret = wait_event_interruptible(file_priv->event_wait,
> - !list_empty(&file_priv->event_list));
> - if (ret < 0)
> - return ret;
> - }
> + spin_unlock_irq(&dev->event_lock);
> + ret = wait_event_interruptible(file_priv->event_wait,
> + !list_empty(&file_priv->event_list));
> + spin_lock_irq(&dev->event_lock);
> + if (ret < 0)
> + break;
> +
> + ret = 0;
> + } else {
> + struct drm_pending_event *e;
> +
> + e = list_first_entry(&file_priv->event_list,
> + struct drm_pending_event, link);
> + if (e->event->length + ret > count)
> + break;
> +
> + if (__copy_to_user_inatomic(buffer + ret,
> + e->event, e->event->length)) {
> + if (ret == 0)
> + ret = -EFAULT;
> + break;
> + }
>
> - total = 0;
> - while (drm_dequeue_event(file_priv, total, count, &e)) {
> - if (copy_to_user(buffer + total,
> - e->event, e->event->length)) {
> - total = -EFAULT;
> - break;
> + file_priv->event_space += e->event->length;
> + ret += e->event->length;
> + list_del(&e->link);
> + e->destroy(e);
> }
> -
> - total += e->event->length;
> - e->destroy(e);
> }
> + spin_unlock_irq(&dev->event_lock);
>
> - return total ?: -EAGAIN;
> + return ret;
> }
> EXPORT_SYMBOL(drm_read);
>
> --
> 2.1.3
>
More information about the Intel-gfx
mailing list