[Intel-gfx] [PATCH 08/11] drm/i915: mm_list is per VMA
Ben Widawsky
ben at bwidawsk.net
Wed Jul 10 18:39:23 CEST 2013
On Tue, Jul 09, 2013 at 09:18:46AM +0200, Daniel Vetter wrote:
> On Mon, Jul 08, 2013 at 11:08:39PM -0700, Ben Widawsky wrote:
> > formerly: "drm/i915: Create VMAs (part 5) - move mm_list"
> >
> > The mm_list is used for the active/inactive LRUs. Since those LRUs are
> > per address space, the link should be per VMx .
> >
> > Because we'll only ever have 1 VMA before this point, it's not incorrect
> > to defer this change until this point in the patch series, and doing it
> > here makes the change much easier to understand.
> >
> > v2: only bump GGTT LRU in i915_gem_object_set_to_gtt_domain (Chris)
> >
> > v3: Moved earlier in the series
> >
> > Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
>
> The commit message seems to miss the explanation (that I've written for
> you) why we move only some of the dev_priv->mm lrus, not all of them ...
Yes. This is a mistake. I had copied into a commit message with
"Shamelessly manipulated out of Daniel:" I'm not sure where it went.
After I address the other issues, I'll decide if I resubmit the whole
series or just this fixed.
Sorry about that, it wasn't ignorance, just incompetence.
> > ---
> > drivers/gpu/drm/i915/i915_debugfs.c | 53 ++++++++++++++++++++++------------
> > drivers/gpu/drm/i915/i915_drv.h | 5 ++--
> > drivers/gpu/drm/i915/i915_gem.c | 34 ++++++++++++++--------
> > drivers/gpu/drm/i915/i915_gem_evict.c | 14 ++++-----
> > drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +-
> > drivers/gpu/drm/i915/i915_irq.c | 37 ++++++++++++++----------
> > 6 files changed, 87 insertions(+), 58 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> > index 867ed07..163ca6b 100644
> > --- a/drivers/gpu/drm/i915/i915_debugfs.c
> > +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> > @@ -157,7 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> > struct drm_device *dev = node->minor->dev;
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > struct i915_address_space *vm = &dev_priv->gtt.base;
> > - struct drm_i915_gem_object *obj;
> > + struct i915_vma *vma;
> > size_t total_obj_size, total_gtt_size;
> > int count, ret;
> >
> > @@ -165,6 +165,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> > if (ret)
> > return ret;
> >
> > + /* FIXME: the user of this interface might want more than just GGTT */
> > switch (list) {
> > case ACTIVE_LIST:
> > seq_puts(m, "Active:\n");
> > @@ -180,12 +181,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> > }
> >
> > total_obj_size = total_gtt_size = count = 0;
> > - list_for_each_entry(obj, head, mm_list) {
> > - seq_puts(m, " ");
> > - describe_obj(m, obj);
> > - seq_putc(m, '\n');
> > - total_obj_size += obj->base.size;
> > - total_gtt_size += i915_gem_obj_ggtt_size(obj);
> > + list_for_each_entry(vma, head, mm_list) {
> > + seq_printf(m, " ");
> > + describe_obj(m, vma->obj);
> > + seq_printf(m, "\n");
> > + total_obj_size += vma->obj->base.size;
> > + total_gtt_size += i915_gem_obj_size(vma->obj, vma->vm);
> > count++;
> > }
> > mutex_unlock(&dev->struct_mutex);
> > @@ -233,7 +234,18 @@ static int per_file_stats(int id, void *ptr, void *data)
> > return 0;
> > }
> >
> > -static int i915_gem_object_info(struct seq_file *m, void *data)
> > +#define count_vmas(list, member) do { \
> > + list_for_each_entry(vma, list, member) { \
> > + size += i915_gem_obj_ggtt_size(vma->obj); \
> > + ++count; \
> > + if (vma->obj->map_and_fenceable) { \
> > + mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
> > + ++mappable_count; \
> > + } \
> > + } \
> > +} while (0)
> > +
> > +static int i915_gem_object_info(struct seq_file *m, void* data)
> > {
> > struct drm_info_node *node = (struct drm_info_node *) m->private;
> > struct drm_device *dev = node->minor->dev;
> > @@ -243,6 +255,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
> > struct drm_i915_gem_object *obj;
> > struct i915_address_space *vm = &dev_priv->gtt.base;
> > struct drm_file *file;
> > + struct i915_vma *vma;
> > int ret;
> >
> > ret = mutex_lock_interruptible(&dev->struct_mutex);
> > @@ -259,12 +272,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
> > count, mappable_count, size, mappable_size);
> >
> > size = count = mappable_size = mappable_count = 0;
> > - count_objects(&vm->active_list, mm_list);
> > + count_vmas(&vm->active_list, mm_list);
> > seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
> > count, mappable_count, size, mappable_size);
> >
> > size = count = mappable_size = mappable_count = 0;
> > - count_objects(&vm->inactive_list, mm_list);
> > + count_vmas(&vm->inactive_list, mm_list);
> > seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
> > count, mappable_count, size, mappable_size);
> >
> > @@ -2037,7 +2050,8 @@ i915_drop_caches_set(void *data, u64 val)
> > struct drm_device *dev = data;
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > struct drm_i915_gem_object *obj, *next;
> > - struct i915_address_space *vm = &dev_priv->gtt.base;
> > + struct i915_address_space *vm;
> > + struct i915_vma *vma, *x;
> > int ret;
> >
> > DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
> > @@ -2058,14 +2072,15 @@ i915_drop_caches_set(void *data, u64 val)
> > i915_gem_retire_requests(dev);
> >
> > if (val & DROP_BOUND) {
> > - list_for_each_entry_safe(obj, next, &vm->inactive_list,
> > - mm_list) {
> > - if (obj->pin_count)
> > - continue;
> > -
> > - ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
> > - if (ret)
> > - goto unlock;
> > + list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > + list_for_each_entry_safe(vma, x, &vm->inactive_list,
> > + mm_list)
> > + if (vma->obj->pin_count == 0) {
> > + ret = i915_gem_object_unbind(vma->obj,
> > + vm);
> > + if (ret)
> > + goto unlock;
> > + }
> > }
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index 48baccc..48105f8 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -541,6 +541,9 @@ struct i915_vma {
> > struct drm_i915_gem_object *obj;
> > struct i915_address_space *vm;
> >
> > + /** This object's place on the active/inactive lists */
> > + struct list_head mm_list;
> > +
> > struct list_head vma_link; /* Link in the object's VMA list */
> > };
> >
> > @@ -1242,9 +1245,7 @@ struct drm_i915_gem_object {
> > struct drm_mm_node *stolen;
> > struct list_head global_list;
> >
> > - /** This object's place on the active/inactive lists */
> > struct list_head ring_list;
> > - struct list_head mm_list;
> > /** This object's place in the batchbuffer or on the eviction list */
> > struct list_head exec_list;
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 501c590..9a58363 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -1888,6 +1888,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> > struct drm_device *dev = obj->base.dev;
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > u32 seqno = intel_ring_get_seqno(ring);
> > + struct i915_vma *vma;
> >
> > BUG_ON(ring == NULL);
> > obj->ring = ring;
> > @@ -1899,7 +1900,8 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> > }
> >
> > /* Move from whatever list we were on to the tail of execution. */
> > - list_move_tail(&obj->mm_list, &vm->active_list);
> > + vma = i915_gem_obj_to_vma(obj, vm);
> > + list_move_tail(&vma->mm_list, &vm->active_list);
> > list_move_tail(&obj->ring_list, &ring->active_list);
> >
> > obj->last_read_seqno = seqno;
> > @@ -1922,10 +1924,13 @@ static void
> > i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
> > struct i915_address_space *vm)
> > {
> > + struct i915_vma *vma;
> > +
> > BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
> > BUG_ON(!obj->active);
> >
> > - list_move_tail(&obj->mm_list, &vm->inactive_list);
> > + vma = i915_gem_obj_to_vma(obj, vm);
> > + list_move_tail(&vma->mm_list, &vm->inactive_list);
> >
> > list_del_init(&obj->ring_list);
> > obj->ring = NULL;
> > @@ -2287,9 +2292,9 @@ void i915_gem_restore_fences(struct drm_device *dev)
> > void i915_gem_reset(struct drm_device *dev)
> > {
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > - struct i915_address_space *vm;
> > - struct drm_i915_gem_object *obj;
> > struct intel_ring_buffer *ring;
> > + struct i915_address_space *vm;
> > + struct i915_vma *vma;
> > int i;
> >
> > for_each_ring(ring, dev_priv, i)
> > @@ -2299,8 +2304,8 @@ void i915_gem_reset(struct drm_device *dev)
> > * necessary invalidation upon reuse.
> > */
> > list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> > - list_for_each_entry(obj, &vm->inactive_list, mm_list)
> > - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
> > + list_for_each_entry(vma, &vm->inactive_list, mm_list)
> > + vma->obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
> >
> > i915_gem_restore_fences(dev);
> > }
> > @@ -2644,12 +2649,12 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
> > i915_gem_gtt_finish_object(obj);
> > i915_gem_object_unpin_pages(obj);
> >
> > - list_del(&obj->mm_list);
> > /* Avoid an unnecessary call to unbind on rebind. */
> > if (i915_is_ggtt(vm))
> > obj->map_and_fenceable = true;
> >
> > vma = i915_gem_obj_to_vma(obj, vm);
> > + list_del(&vma->mm_list);
> > list_del(&vma->vma_link);
> > drm_mm_remove_node(&vma->node);
> > i915_gem_vma_destroy(vma);
> > @@ -3197,7 +3202,7 @@ search_free:
> > }
> >
> > list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> > - list_add_tail(&obj->mm_list, &vm->inactive_list);
> > + list_add_tail(&vma->mm_list, &vm->inactive_list);
> >
> > /* Keep GGTT vmas first to make debug easier */
> > if (i915_is_ggtt(vm))
> > @@ -3354,9 +3359,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> > old_write_domain);
> >
> > /* And bump the LRU for this access */
> > - if (i915_gem_object_is_inactive(obj))
> > - list_move_tail(&obj->mm_list,
> > - &dev_priv->gtt.base.inactive_list);
> > + if (i915_gem_object_is_inactive(obj)) {
> > + struct i915_vma *vma = i915_gem_obj_to_vma(obj,
> > + &dev_priv->gtt.base);
> > + if (vma)
> > + list_move_tail(&vma->mm_list,
> > + &dev_priv->gtt.base.inactive_list);
> > +
> > + }
> >
> > return 0;
> > }
> > @@ -3931,7 +3941,6 @@ unlock:
> > void i915_gem_object_init(struct drm_i915_gem_object *obj,
> > const struct drm_i915_gem_object_ops *ops)
> > {
> > - INIT_LIST_HEAD(&obj->mm_list);
> > INIT_LIST_HEAD(&obj->global_list);
> > INIT_LIST_HEAD(&obj->ring_list);
> > INIT_LIST_HEAD(&obj->exec_list);
> > @@ -4071,6 +4080,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> > return ERR_PTR(-ENOMEM);
> >
> > INIT_LIST_HEAD(&vma->vma_link);
> > + INIT_LIST_HEAD(&vma->mm_list);
> > vma->vm = vm;
> > vma->obj = obj;
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> > index 32efdc0..18a44a9 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> > @@ -87,8 +87,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
> > drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
> >
> > /* First see if there is a large enough contiguous idle region... */
> > - list_for_each_entry(obj, &vm->inactive_list, mm_list) {
> > - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> > + list_for_each_entry(vma, &vm->inactive_list, mm_list) {
> > if (mark_free(vma, &unwind_list))
> > goto found;
> > }
> > @@ -97,8 +96,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
> > goto none;
> >
> > /* Now merge in the soon-to-be-expired objects... */
> > - list_for_each_entry(obj, &vm->active_list, mm_list) {
> > - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> > + list_for_each_entry(vma, &vm->active_list, mm_list) {
> > if (mark_free(vma, &unwind_list))
> > goto found;
> > }
> > @@ -159,7 +157,7 @@ i915_gem_evict_everything(struct drm_device *dev)
> > {
> > drm_i915_private_t *dev_priv = dev->dev_private;
> > struct i915_address_space *vm;
> > - struct drm_i915_gem_object *obj, *next;
> > + struct i915_vma *vma, *next;
> > bool lists_empty = true;
> > int ret;
> >
> > @@ -187,9 +185,9 @@ i915_gem_evict_everything(struct drm_device *dev)
> >
> > /* Having flushed everything, unbind() should never raise an error */
> > list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > - list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
> > - if (obj->pin_count == 0)
> > - WARN_ON(i915_gem_object_unbind(obj, vm));
> > + list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
> > + if (vma->obj->pin_count == 0)
> > + WARN_ON(i915_gem_object_unbind(vma->obj, vm));
> > }
> >
> > return 0;
> > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > index bfe61fa..58b2613 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > @@ -415,7 +415,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> > obj->has_global_gtt_mapping = 1;
> >
> > list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> > - list_add_tail(&obj->mm_list, &vm->inactive_list);
> > + list_add_tail(&vma->mm_list, &dev_priv->gtt.base.inactive_list);
> >
> > return obj;
> >
> > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> > index 28fa0ff..e065232 100644
> > --- a/drivers/gpu/drm/i915/i915_irq.c
> > +++ b/drivers/gpu/drm/i915/i915_irq.c
> > @@ -1640,11 +1640,11 @@ static void capture_bo(struct drm_i915_error_buffer *err,
> > static u32 capture_active_bo(struct drm_i915_error_buffer *err,
> > int count, struct list_head *head)
> > {
> > - struct drm_i915_gem_object *obj;
> > + struct i915_vma *vma;
> > int i = 0;
> >
> > - list_for_each_entry(obj, head, mm_list) {
> > - capture_bo(err++, obj);
> > + list_for_each_entry(vma, head, mm_list) {
> > + capture_bo(err++, vma->obj);
> > if (++i == count)
> > break;
> > }
> > @@ -1706,8 +1706,9 @@ static struct drm_i915_error_object *
> > i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> > struct intel_ring_buffer *ring)
> > {
> > + struct i915_address_space *vm;
> > + struct i915_vma *vma;
> > struct drm_i915_gem_object *obj;
> > - struct i915_address_space *vm = &dev_priv->gtt.base;
> > u32 seqno;
> >
> > if (!ring->get_seqno)
> > @@ -1729,20 +1730,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> > }
> >
> > seqno = ring->get_seqno(ring, false);
> > - list_for_each_entry(obj, &vm->active_list, mm_list) {
> > - if (obj->ring != ring)
> > - continue;
> > + list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > + list_for_each_entry(vma, &vm->active_list, mm_list) {
> > + obj = vma->obj;
> > + if (obj->ring != ring)
> > + continue;
> >
> > - if (i915_seqno_passed(seqno, obj->last_read_seqno))
> > - continue;
> > + if (i915_seqno_passed(seqno, obj->last_read_seqno))
> > + continue;
> >
> > - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
> > - continue;
> > + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
> > + continue;
> >
> > - /* We need to copy these to an anonymous buffer as the simplest
> > - * method to avoid being overwritten by userspace.
> > - */
> > - return i915_error_object_create(dev_priv, obj);
> > + /* We need to copy these to an anonymous buffer as the simplest
> > + * method to avoid being overwritten by userspace.
> > + */
> > + return i915_error_object_create(dev_priv, obj);
> > + }
> > }
> >
> > return NULL;
> > @@ -1863,11 +1867,12 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
> > struct drm_i915_error_state *error)
> > {
> > struct drm_i915_gem_object *obj;
> > + struct i915_vma *vma;
> > struct i915_address_space *vm = &dev_priv->gtt.base;
> > int i;
> >
> > i = 0;
> > - list_for_each_entry(obj, &vm->active_list, mm_list)
> > + list_for_each_entry(vma, &vm->active_list, mm_list)
> > i++;
> > error->active_bo_count = i;
> > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> > --
> > 1.8.3.2
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
--
Ben Widawsky, Intel Open Source Technology Center
More information about the Intel-gfx
mailing list