[Intel-gfx] [PATCH 03/11] drm/i915: Prepare for non-object vma

Joonas Lahtinen joonas.lahtinen at linux.intel.com
Tue Jun 5 09:21:29 UTC 2018


Quoting Chris Wilson (2018-06-05 10:19:41)
> In order to allow ourselves to use VMA to wrap other entities other than
> GEM objects, we need to allow for the vma->obj backpointer to be NULL.
> In most cases, we know we are operating on a GEM object and its vma, but
> we need the core code (such as i915_vma_pin/insert/bind/unbind) to work
> regardless of the innards.
> 
> The remaining eyesore here is vma->obj->cache_level and related (but
> less of an issue) vma->obj->gt_ro. With a bit of care we should mirror
> those on the vma itself.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> Cc: Matthew Auld <matthew.william.auld at gmail.com>

> @@ -1569,11 +1572,11 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
>         int count_inactive, count_active;
>  
>         count_inactive = 0;
> -       list_for_each_entry(vma, &vm->active_list, vm_link)
> +       list_for_each_entry(vma, &vm->inactive_list, vm_link)
>                 count_inactive++;
>  
>         count_active = 0;
> -       list_for_each_entry(vma, &vm->inactive_list, vm_link)
> +       list_for_each_entry(vma, &vm->active_list, vm_link)
>                 count_active++;

Pretty sure this should go as a separate bugfix...

> @@ -586,23 +592,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
>                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
>         }
>         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> -       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
> +       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
>  
>         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>  
> -       spin_lock(&dev_priv->mm.obj_lock);
> -       list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> -       obj->bind_count++;
> -       spin_unlock(&dev_priv->mm.obj_lock);
> +       if (vma->obj) {
> +               struct drm_i915_gem_object *obj = vma->obj;
> +
> +               spin_lock(&dev_priv->mm.obj_lock);
> +               list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> +               obj->bind_count++;
> +               spin_unlock(&dev_priv->mm.obj_lock);
>  
> -       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
> +               GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);

checkpatch will yell for long line. Block could even warrant a __ func.

> +       }
>  
>         return 0;
>  
>  err_clear:
>         vma->vm->clear_pages(vma);
>  err_unpin:
> -       i915_gem_object_unpin_pages(obj);
> +       if (vma->obj)
> +               i915_gem_object_unpin_pages(vma->obj);
>         return ret;
>  }
>  
> @@ -610,7 +621,6 @@ static void
>  i915_vma_remove(struct i915_vma *vma)
>  {
>         struct drm_i915_private *i915 = vma->vm->i915;
> -       struct drm_i915_gem_object *obj = vma->obj;
>  
>         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
> @@ -620,20 +630,26 @@ i915_vma_remove(struct i915_vma *vma)
>         drm_mm_remove_node(&vma->node);
>         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
>  
> -       /* Since the unbound list is global, only move to that list if
> +       /*
> +        * Since the unbound list is global, only move to that list if
>          * no more VMAs exist.
>          */
> -       spin_lock(&i915->mm.obj_lock);
> -       if (--obj->bind_count == 0)
> -               list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
> -       spin_unlock(&i915->mm.obj_lock);
> -
> -       /* And finally now the object is completely decoupled from this vma,
> -        * we can drop its hold on the backing storage and allow it to be
> -        * reaped by the shrinker.
> -        */
> -       i915_gem_object_unpin_pages(obj);
> -       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
> +       if (vma->obj) {
> +               struct drm_i915_gem_object *obj = vma->obj;
> +
> +               spin_lock(&i915->mm.obj_lock);
> +               if (--obj->bind_count == 0)
> +                       list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
> +               spin_unlock(&i915->mm.obj_lock);
> +
> +               /*
> +                * And finally now the object is completely decoupled from this
> +                * vma, we can drop its hold on the backing storage and allow
> +                * it to be reaped by the shrinker.
> +                */
> +               i915_gem_object_unpin_pages(obj);
> +               GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);

Right, two helpers it is.

> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
>  static inline void
>  i915_vma_unpin_fence(struct i915_vma *vma)
>  {
> -       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
> +       /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */

Umm?

Regards, Joonas


More information about the Intel-gfx mailing list