[PATCH 25/25] drm: move dev data clearing from drm_setup to lastclose

David Herrmann dh.herrmann at gmail.com
Tue Aug 13 11:12:05 PDT 2013


Hi

On Thu, Aug 8, 2013 at 3:41 PM, Daniel Vetter <daniel.vetter at ffwll.ch> wrote:
> We kzalloc this structure, and for real kms devices we should never
> loose track of things really.
>
> But ums/legacy drivers rely on the drm core to clean up a bit of cruft
> between lastclose and firstopen (i.e. when X is being restarted), so
> keep this around. But give it a clear drm_legacy_ prefix and
> conditionalize the code on !DRIVER_MODESET.
>
> Cc: David Herrmann <dh.herrmann at gmail.com>
> Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>

Reviewed-by: David Herrmann <dh.herrmann at gmail.com>

Regards
David

> ---
>  drivers/gpu/drm/drm_drv.c  | 27 +++++++++++++++++++++++++++
>  drivers/gpu/drm/drm_fops.c | 27 +++------------------------
>  2 files changed, 30 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> index a845f31..3b8d044 100644
> --- a/drivers/gpu/drm/drm_drv.c
> +++ b/drivers/gpu/drm/drm_drv.c
> @@ -171,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
>  #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
>
>  /**
> + * drm_legacy_dev_reinit
> + *
> + * Reinitializes a legacy/ums drm device in it's lastclose function.
> + */
> +static void drm_legacy_dev_reinit(struct drm_device *dev)
> +{
> +       int i;
> +
> +       if (drm_core_check_feature(dev, DRIVER_MODESET))
> +               return;
> +
> +       atomic_set(&dev->ioctl_count, 0);
> +       atomic_set(&dev->vma_count, 0);
> +
> +       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
> +               atomic_set(&dev->counts[i], 0);
> +
> +       dev->sigdata.lock = NULL;
> +
> +       dev->context_flag = 0;
> +       dev->last_context = 0;
> +       dev->if_version = 0;
> +}
> +
> +/**
>   * Take down the DRM device.
>   *
>   * \param dev DRM device structure.
> @@ -209,6 +234,8 @@ int drm_lastclose(struct drm_device * dev)
>         dev->dev_mapping = NULL;
>         mutex_unlock(&dev->struct_mutex);
>
> +       drm_legacy_dev_reinit(dev);
> +
>         DRM_DEBUG("lastclose completed\n");
>         return 0;
>  }
> diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
> index 5679971..59f4592 100644
> --- a/drivers/gpu/drm/drm_fops.c
> +++ b/drivers/gpu/drm/drm_fops.c
> @@ -48,7 +48,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
>
>  static int drm_setup(struct drm_device * dev)
>  {
> -       int i;
>         int ret;
>
>         if (dev->driver->firstopen &&
> @@ -58,32 +57,12 @@ static int drm_setup(struct drm_device * dev)
>                         return ret;
>         }
>
> -       atomic_set(&dev->ioctl_count, 0);
> -       atomic_set(&dev->vma_count, 0);
> -
> -       i = drm_legacy_dma_setup(dev);
> -       if (i < 0)
> -               return i;
> -
> -       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
> -               atomic_set(&dev->counts[i], 0);
> -
> -       dev->sigdata.lock = NULL;
> +       ret = drm_legacy_dma_setup(dev);
> +       if (ret < 0)
> +               return ret;
>
> -       dev->context_flag = 0;
> -       dev->last_context = 0;
> -       dev->if_version = 0;
>
>         DRM_DEBUG("\n");
> -
> -       /*
> -        * The kernel's context could be created here, but is now created
> -        * in drm_dma_enqueue.  This is more resource-efficient for
> -        * hardware that does not do DMA, but may mean that
> -        * drm_select_queue fails between the time the interrupt is
> -        * initialized and the time the queues are initialized.
> -        */
> -
>         return 0;
>  }
>
> --
> 1.8.3.2
>


More information about the dri-devel mailing list