[PATCH 3/5] drm/vmwgfx: use core drm to extend/check vmw_execbuf_ioctl
Thomas Hellstrom
thellstrom at vmware.com
Fri May 24 06:05:41 UTC 2019
On Wed, 2019-05-22 at 21:09 +0200, Daniel Vetter wrote:
> On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <
> thellstrom at vmware.com> wrote:
> > Hi, Emil,
> >
> > On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> > > From: Emil Velikov <emil.velikov at collabora.com>
> > >
> > > Currently vmw_execbuf_ioctl() open-codes the permission checking,
> > > size
> > > extending and copying that is already done in core drm.
> > >
> > > Kill all the duplication, adding a few comments for clarity.
> >
> > Ah, there is core functionality for this now.
> >
> > What worries me though with the core approach is that the sizes are
> > not
> > capped by the size of the kernel argument definition, which makes
> > mailicious user-space being able to force kmallocs() the size of
> > the
> > maximum ioctl size. Should probably be fixed before pushing this.
>
> Hm I always worked under the assumption that kmalloc and friends
> should be userspace hardened. Otherwise stuff like kmalloc_array
> doesn't make any sense, everyone just feeds it unchecked input and
> expects that helper to handle overflows.
>
> If we assume kmalloc isn't hardened against that, then we have a much
> bigger problem than just vmwgfx ioctls ...
After checking the drm_ioctl code I realize that what I thought was new
behaviour actually has been around for a couple of years, so
fixing isn't really tied to this patch series...
What caused me to react was that previously we used to have this
e4fda9f264e1 ("drm: Perform ioctl command validation on the stored
kernel values")
and we seem to have lost that now, if not for the io flags then at
least for the size part. For the size of the ioctl arguments, I think
in general if the kernel only touches a subset of the user-space
specified size I see no reason why we should malloc / copy more than
that?
Now, given the fact that the maximum ioctl argument size is quite
limited, that might not be a big problem or a problem at all. Otherwise
it would be pretty easy for a malicious process to allocate most or all
of a system's resident memory?
/Thomas
> -Daniel
>
> >
> > > Cc: "VMware Graphics" <linux-graphics-maintainer at vmware.com>
> > > Cc: Thomas Hellstrom <thellstrom at vmware.com>
> > > Cc: Daniel Vetter <daniel at ffwll.ch>
> > > Signed-off-by: Emil Velikov <emil.velikov at collabora.com>
> > > ---
> > > Thomas, VMware team,
> > >
> > > Please give this some testing on your end. I've only tested it
> > > against
> > > mesa-master.
> >
> > I'll review tomorrow and do some testing. Need to see if I can dig
> > up
> > user-space apps with version 0...
> >
> > Thanks,
> >
> > Thomas
> >
> > > Thanks
> > > Emil
> > > ---
> > > drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 12 +-----
> > > drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +-
> > > drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++--------
> > > ----
> > > ----
> > > 3 files changed, 23 insertions(+), 45 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > index d3f108f7e52d..2cb6ae219e43 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc
> > > vmw_ioctls[] =
> > > {
> > > DRM_RENDER_ALLOW),
> > > VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
> > > DRM_AUTH | DRM_RENDER_ALLOW),
> > > - VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> > > + VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
> > > DRM_RENDER_ALLOW),
> > > VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
> > > DRM_RENDER_ALLOW),
> > > @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> > > *filp, unsigned int cmd,
> > > &vmw_ioctls[nr - DRM_COMMAND_BASE];
> > >
> > > if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> > > - ret = (long) drm_ioctl_permit(ioctl->flags,
> > > file_priv);
> > > - if (unlikely(ret != 0))
> > > - return ret;
> > > -
> > > - if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> > > IOC_IN))
> > > - goto out_io_encoding;
> > > -
> > > - return (long) vmw_execbuf_ioctl(dev, arg,
> > > file_priv,
> > > - _IOC_SIZE(c
> > > md))
> > > ;
> > > + return ioctl_func(filp, cmd, arg);
> > > } else if (nr == DRM_COMMAND_BASE +
> > > DRM_VMW_UPDATE_LAYOUT) {
> > > if (!drm_is_current_master(file_priv) &&
> > > !capable(CAP_SYS_ADMIN))
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > index 9be2176cc260..f5bfac85f793 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > @@ -910,8 +910,8 @@ static inline struct page
> > > *vmw_piter_page(struct
> > > vmw_piter *viter)
> > > * Command submission - vmwgfx_execbuf.c
> > > */
> > >
> > > -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned
> > > long
> > > data,
> > > - struct drm_file *file_priv, size_t
> > > size);
> > > +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > + struct drm_file *file_priv);
> > > extern int vmw_execbuf_process(struct drm_file *file_priv,
> > > struct vmw_private *dev_priv,
> > > void __user *user_commands,
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > index 2ff7ba04d8c8..767e2b99618d 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> > > vmw_private *dev_priv)
> > > mutex_unlock(&dev_priv->cmdbuf_mutex);
> > > }
> > >
> > > -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> > > data,
> > > - struct drm_file *file_priv, size_t size)
> > > +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > + struct drm_file *file_priv)
> > > {
> > > struct vmw_private *dev_priv = vmw_priv(dev);
> > > - struct drm_vmw_execbuf_arg arg;
> > > + struct drm_vmw_execbuf_arg *arg = data;
> > > int ret;
> > > - static const size_t copy_offset[] = {
> > > - offsetof(struct drm_vmw_execbuf_arg,
> > > context_handle),
> > > - sizeof(struct drm_vmw_execbuf_arg)};
> > > struct dma_fence *in_fence = NULL;
> > >
> > > - if (unlikely(size < copy_offset[0])) {
> > > - VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> > > - DRM_VMW_EXECBUF);
> > > - return -EINVAL;
> > > - }
> > > -
> > > - if (copy_from_user(&arg, (void __user *) data,
> > > copy_offset[0])
> > > != 0)
> > > - return -EFAULT;
> > > -
> > > /*
> > > * Extend the ioctl argument while maintaining backwards
> > > compatibility:
> > > - * We take different code paths depending on the value of
> > > arg.version.
> > > + * We take different code paths depending on the value of
> > > arg-
> > > > version.
> > > + *
> > > + * Note: The ioctl argument is extended and zeropadded by
> > > core
> > > DRM.
> > > */
> > > - if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> > > - arg.version == 0)) {
> > > + if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> > > + arg->version == 0)) {
> > > VMW_DEBUG_USER("Incorrect execbuf version.\n");
> > > return -EINVAL;
> > > }
> > >
> > > - if (arg.version > 1 &&
> > > - copy_from_user(&arg.context_handle,
> > > - (void __user *) (data + copy_offset[0]),
> > > - copy_offset[arg.version - 1] -
> > > copy_offset[0]) != 0)
> > > - return -EFAULT;
> > > -
> > > - switch (arg.version) {
> > > + switch (arg->version) {
> > > case 1:
> > > - arg.context_handle = (uint32_t) -1;
> > > + /* For v1 core DRM have extended + zeropadded the
> > > data
> > > */
> > > + arg->context_handle = (uint32_t) -1;
> > > break;
> > > case 2:
> > > default:
> > > + /* For v2 and later core DRM would have correctly
> > > copied it */
> > > break;
> > > }
> > >
> > > /* If imported a fence FD from elsewhere, then wait on it
> > > */
> > > - if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > - in_fence =
> > > sync_file_get_fence(arg.imported_fence_fd);
> > > + if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > + in_fence = sync_file_get_fence(arg-
> > > >imported_fence_fd);
> > >
> > > if (!in_fence) {
> > > VMW_DEBUG_USER("Cannot get imported
> > > fence\n");
> > > @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device
> > > *dev,
> > > unsigned long data,
> > > return ret;
> > >
> > > ret = vmw_execbuf_process(file_priv, dev_priv,
> > > - (void __user *)(unsigned
> > > long)arg.commands,
> > > - NULL, arg.command_size,
> > > arg.throttle_us,
> > > - arg.context_handle,
> > > - (void __user *)(unsigned
> > > long)arg.fence_rep,
> > > - NULL, arg.flags);
> > > + (void __user *)(unsigned
> > > long)arg-
> > > > commands,
> > > + NULL, arg->command_size, arg-
> > > > throttle_us,
> > > + arg->context_handle,
> > > + (void __user *)(unsigned
> > > long)arg-
> > > > fence_rep,
> > > + NULL, arg->flags);
> > >
> > > ttm_read_unlock(&dev_priv->reservation_sem);
> > > if (unlikely(ret != 0))
>
>
More information about the dri-devel
mailing list