[Intel-gfx] [PATCH 02/12] drm/i915: Fix up map and fenceable for VMA
Daniel Vetter
daniel at ffwll.ch
Tue Jul 23 18:42:35 CEST 2013
On Sun, Jul 21, 2013 at 07:08:09PM -0700, Ben Widawsky wrote:
> formerly: "drm/i915: Create VMAs (part 3.5) - map and fenceable
> tracking"
>
> The map_and_fenceable tracking is per object. GTT mapping, and fences
> only apply to global GTT. As such, object operations which are not
> performed on the global GTT should not effect mappable or fenceable
> characteristics.
>
> Functionally, this commit could very well be squashed in to the previous
> patch which updated object operations to take a VM argument. This
> commit is split out because it's a bit tricky (or at least it was for
> me).
>
> Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
On a quick read there seems to be lots of stuff in here which belongs into
other patches, like error handling changes, debugfs changes. At least
since you claim that the mappable/fenceable stuff is tricky it seems to
not stick out that much ...
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 53 ++++++++++++++++++++++------------
> drivers/gpu/drm/i915/i915_drv.h | 5 ++--
> drivers/gpu/drm/i915/i915_gem.c | 43 +++++++++++++++++----------
> drivers/gpu/drm/i915/i915_gem_evict.c | 14 ++++-----
> drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +-
> drivers/gpu/drm/i915/i915_gpu_error.c | 37 ++++++++++++++----------
> 6 files changed, 93 insertions(+), 61 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index f8e590f..0b7df6c 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -144,7 +144,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> struct drm_device *dev = node->minor->dev;
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct i915_address_space *vm = &dev_priv->gtt.base;
> - struct drm_i915_gem_object *obj;
> + struct i915_vma *vma;
> size_t total_obj_size, total_gtt_size;
> int count, ret;
>
> @@ -152,6 +152,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> if (ret)
> return ret;
>
> + /* FIXME: the user of this interface might want more than just GGTT */
> switch (list) {
> case ACTIVE_LIST:
> seq_puts(m, "Active:\n");
> @@ -167,12 +168,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> }
>
> total_obj_size = total_gtt_size = count = 0;
> - list_for_each_entry(obj, head, mm_list) {
> - seq_puts(m, " ");
> - describe_obj(m, obj);
> - seq_putc(m, '\n');
> - total_obj_size += obj->base.size;
> - total_gtt_size += i915_gem_obj_ggtt_size(obj);
> + list_for_each_entry(vma, head, mm_list) {
> + seq_printf(m, " ");
> + describe_obj(m, vma->obj);
> + seq_printf(m, "\n");
> + total_obj_size += vma->obj->base.size;
> + total_gtt_size += i915_gem_obj_size(vma->obj, vma->vm);
> count++;
> }
> mutex_unlock(&dev->struct_mutex);
> @@ -220,7 +221,18 @@ static int per_file_stats(int id, void *ptr, void *data)
> return 0;
> }
>
> -static int i915_gem_object_info(struct seq_file *m, void *data)
> +#define count_vmas(list, member) do { \
> + list_for_each_entry(vma, list, member) { \
> + size += i915_gem_obj_ggtt_size(vma->obj); \
> + ++count; \
> + if (vma->obj->map_and_fenceable) { \
> + mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
> + ++mappable_count; \
> + } \
> + } \
> +} while (0)
> +
> +static int i915_gem_object_info(struct seq_file *m, void* data)
> {
> struct drm_info_node *node = (struct drm_info_node *) m->private;
> struct drm_device *dev = node->minor->dev;
> @@ -230,6 +242,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
> struct drm_i915_gem_object *obj;
> struct i915_address_space *vm = &dev_priv->gtt.base;
> struct drm_file *file;
> + struct i915_vma *vma;
> int ret;
>
> ret = mutex_lock_interruptible(&dev->struct_mutex);
> @@ -249,12 +262,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
> count, mappable_count, size, mappable_size);
>
> size = count = mappable_size = mappable_count = 0;
> - count_objects(&vm->active_list, mm_list);
> + count_vmas(&vm->active_list, mm_list);
> seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
> count, mappable_count, size, mappable_size);
>
> size = count = mappable_size = mappable_count = 0;
> - count_objects(&vm->inactive_list, mm_list);
> + count_vmas(&vm->inactive_list, mm_list);
> seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
> count, mappable_count, size, mappable_size);
>
> @@ -1767,7 +1780,8 @@ i915_drop_caches_set(void *data, u64 val)
> struct drm_device *dev = data;
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct drm_i915_gem_object *obj, *next;
> - struct i915_address_space *vm = &dev_priv->gtt.base;
> + struct i915_address_space *vm;
> + struct i915_vma *vma, *x;
> int ret;
>
> DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
> @@ -1788,14 +1802,15 @@ i915_drop_caches_set(void *data, u64 val)
> i915_gem_retire_requests(dev);
>
> if (val & DROP_BOUND) {
> - list_for_each_entry_safe(obj, next, &vm->inactive_list,
> - mm_list) {
> - if (obj->pin_count)
> - continue;
> -
> - ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
> - if (ret)
> - goto unlock;
> + list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> + list_for_each_entry_safe(vma, x, &vm->inactive_list,
> + mm_list)
> + if (vma->obj->pin_count == 0) {
> + ret = i915_gem_object_unbind(vma->obj,
> + vm);
> + if (ret)
> + goto unlock;
> + }
> }
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 681cb41..b208c30 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -541,6 +541,9 @@ struct i915_vma {
> struct drm_i915_gem_object *obj;
> struct i915_address_space *vm;
>
> + /** This object's place on the active/inactive lists */
> + struct list_head mm_list;
> +
> struct list_head vma_link; /* Link in the object's VMA list */
> };
>
> @@ -1258,9 +1261,7 @@ struct drm_i915_gem_object {
> struct drm_mm_node *stolen;
> struct list_head global_list;
>
> - /** This object's place on the active/inactive lists */
> struct list_head ring_list;
> - struct list_head mm_list;
> /** This object's place in the batchbuffer or on the eviction list */
> struct list_head exec_list;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 0111554..6bdf89d 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1874,6 +1874,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> struct drm_device *dev = obj->base.dev;
> struct drm_i915_private *dev_priv = dev->dev_private;
> u32 seqno = intel_ring_get_seqno(ring);
> + struct i915_vma *vma;
>
> BUG_ON(ring == NULL);
> if (obj->ring != ring && obj->last_write_seqno) {
> @@ -1889,7 +1890,8 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> }
>
> /* Move from whatever list we were on to the tail of execution. */
> - list_move_tail(&obj->mm_list, &vm->active_list);
> + vma = i915_gem_obj_to_vma(obj, vm);
> + list_move_tail(&vma->mm_list, &vm->active_list);
> list_move_tail(&obj->ring_list, &ring->active_list);
>
> obj->last_read_seqno = seqno;
> @@ -1912,10 +1914,13 @@ static void
> i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
> struct i915_address_space *vm)
> {
> + struct i915_vma *vma;
> +
> BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
> BUG_ON(!obj->active);
>
> - list_move_tail(&obj->mm_list, &vm->inactive_list);
> + vma = i915_gem_obj_to_vma(obj, vm);
> + list_move_tail(&vma->mm_list, &vm->inactive_list);
>
> list_del_init(&obj->ring_list);
> obj->ring = NULL;
> @@ -2285,9 +2290,9 @@ void i915_gem_restore_fences(struct drm_device *dev)
> void i915_gem_reset(struct drm_device *dev)
> {
> struct drm_i915_private *dev_priv = dev->dev_private;
> - struct i915_address_space *vm;
> - struct drm_i915_gem_object *obj;
> struct intel_ring_buffer *ring;
> + struct i915_address_space *vm;
> + struct i915_vma *vma;
> int i;
>
> for_each_ring(ring, dev_priv, i)
> @@ -2297,8 +2302,8 @@ void i915_gem_reset(struct drm_device *dev)
> * necessary invalidation upon reuse.
> */
> list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> - list_for_each_entry(obj, &vm->inactive_list, mm_list)
> - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
> + list_for_each_entry(vma, &vm->inactive_list, mm_list)
> + vma->obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
>
> i915_gem_restore_fences(dev);
> }
> @@ -2633,7 +2638,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
>
> trace_i915_gem_object_unbind(obj, vm);
>
> - if (obj->has_global_gtt_mapping)
> + if (obj->has_global_gtt_mapping && i915_is_ggtt(vm))
> i915_gem_gtt_unbind_object(obj);
> if (obj->has_aliasing_ppgtt_mapping) {
> i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
> @@ -2642,11 +2647,12 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
> i915_gem_gtt_finish_object(obj);
> i915_gem_object_unpin_pages(obj);
>
> - list_del(&obj->mm_list);
> /* Avoid an unnecessary call to unbind on rebind. */
> - obj->map_and_fenceable = true;
> + if (i915_is_ggtt(vm))
> + obj->map_and_fenceable = true;
>
> vma = i915_gem_obj_to_vma(obj, vm);
> + list_del(&vma->mm_list);
> list_del(&vma->vma_link);
> drm_mm_remove_node(&vma->node);
> i915_gem_vma_destroy(vma);
> @@ -3171,7 +3177,7 @@ search_free:
> goto err_out;
>
> list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> - list_add_tail(&obj->mm_list, &vm->inactive_list);
> + list_add_tail(&vma->mm_list, &vm->inactive_list);
>
> /* Keep GGTT vmas first to make debug easier */
> if (i915_is_ggtt(vm))
> @@ -3188,7 +3194,9 @@ search_free:
> i915_is_ggtt(vm) &&
> vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
>
> - obj->map_and_fenceable = mappable && fenceable;
> + /* Map and fenceable only changes if the VM is the global GGTT */
> + if (i915_is_ggtt(vm))
> + obj->map_and_fenceable = mappable && fenceable;
>
> trace_i915_gem_object_bind(obj, vm, map_and_fenceable);
> i915_gem_verify_gtt(dev);
> @@ -3332,9 +3340,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> old_write_domain);
>
> /* And bump the LRU for this access */
> - if (i915_gem_object_is_inactive(obj))
> - list_move_tail(&obj->mm_list,
> - &dev_priv->gtt.base.inactive_list);
> + if (i915_gem_object_is_inactive(obj)) {
> + struct i915_vma *vma = i915_gem_obj_to_vma(obj,
> + &dev_priv->gtt.base);
> + if (vma)
> + list_move_tail(&vma->mm_list,
> + &dev_priv->gtt.base.inactive_list);
> +
> + }
>
> return 0;
> }
> @@ -3906,7 +3919,6 @@ unlock:
> void i915_gem_object_init(struct drm_i915_gem_object *obj,
> const struct drm_i915_gem_object_ops *ops)
> {
> - INIT_LIST_HEAD(&obj->mm_list);
> INIT_LIST_HEAD(&obj->global_list);
> INIT_LIST_HEAD(&obj->ring_list);
> INIT_LIST_HEAD(&obj->exec_list);
> @@ -4043,6 +4055,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> return ERR_PTR(-ENOMEM);
>
> INIT_LIST_HEAD(&vma->vma_link);
> + INIT_LIST_HEAD(&vma->mm_list);
> vma->vm = vm;
> vma->obj = obj;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 32efdc0..18a44a9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -87,8 +87,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
> drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
>
> /* First see if there is a large enough contiguous idle region... */
> - list_for_each_entry(obj, &vm->inactive_list, mm_list) {
> - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> + list_for_each_entry(vma, &vm->inactive_list, mm_list) {
> if (mark_free(vma, &unwind_list))
> goto found;
> }
> @@ -97,8 +96,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
> goto none;
>
> /* Now merge in the soon-to-be-expired objects... */
> - list_for_each_entry(obj, &vm->active_list, mm_list) {
> - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> + list_for_each_entry(vma, &vm->active_list, mm_list) {
> if (mark_free(vma, &unwind_list))
> goto found;
> }
> @@ -159,7 +157,7 @@ i915_gem_evict_everything(struct drm_device *dev)
> {
> drm_i915_private_t *dev_priv = dev->dev_private;
> struct i915_address_space *vm;
> - struct drm_i915_gem_object *obj, *next;
> + struct i915_vma *vma, *next;
> bool lists_empty = true;
> int ret;
>
> @@ -187,9 +185,9 @@ i915_gem_evict_everything(struct drm_device *dev)
>
> /* Having flushed everything, unbind() should never raise an error */
> list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> - list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
> - if (obj->pin_count == 0)
> - WARN_ON(i915_gem_object_unbind(obj, vm));
> + list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
> + if (vma->obj->pin_count == 0)
> + WARN_ON(i915_gem_object_unbind(vma->obj, vm));
> }
>
> return 0;
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 000ffbd..fa60103 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -419,7 +419,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> obj->has_global_gtt_mapping = 1;
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> - list_add_tail(&obj->mm_list, &ggtt->inactive_list);
> + list_add_tail(&vma->mm_list, &ggtt->inactive_list);
>
> return obj;
>
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index d970d84..9623a4e 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -556,11 +556,11 @@ static void capture_bo(struct drm_i915_error_buffer *err,
> static u32 capture_active_bo(struct drm_i915_error_buffer *err,
> int count, struct list_head *head)
> {
> - struct drm_i915_gem_object *obj;
> + struct i915_vma *vma;
> int i = 0;
>
> - list_for_each_entry(obj, head, mm_list) {
> - capture_bo(err++, obj);
> + list_for_each_entry(vma, head, mm_list) {
> + capture_bo(err++, vma->obj);
> if (++i == count)
> break;
> }
> @@ -622,7 +622,8 @@ static struct drm_i915_error_object *
> i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> struct intel_ring_buffer *ring)
> {
> - struct i915_address_space *vm = &dev_priv->gtt.base;
> + struct i915_address_space *vm;
> + struct i915_vma *vma;
> struct drm_i915_gem_object *obj;
> u32 seqno;
>
> @@ -642,20 +643,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> }
>
> seqno = ring->get_seqno(ring, false);
> - list_for_each_entry(obj, &vm->active_list, mm_list) {
> - if (obj->ring != ring)
> - continue;
> + list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> + list_for_each_entry(vma, &vm->active_list, mm_list) {
> + obj = vma->obj;
> + if (obj->ring != ring)
> + continue;
>
> - if (i915_seqno_passed(seqno, obj->last_read_seqno))
> - continue;
> + if (i915_seqno_passed(seqno, obj->last_read_seqno))
> + continue;
>
> - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
> - continue;
> + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
> + continue;
>
> - /* We need to copy these to an anonymous buffer as the simplest
> - * method to avoid being overwritten by userspace.
> - */
> - return i915_error_object_create(dev_priv, obj);
> + /* We need to copy these to an anonymous buffer as the simplest
> + * method to avoid being overwritten by userspace.
> + */
> + return i915_error_object_create(dev_priv, obj);
> + }
> }
>
> return NULL;
> @@ -775,11 +779,12 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
> struct drm_i915_error_state *error)
> {
> struct i915_address_space *vm = &dev_priv->gtt.base;
> + struct i915_vma *vma;
> struct drm_i915_gem_object *obj;
> int i;
>
> i = 0;
> - list_for_each_entry(obj, &vm->active_list, mm_list)
> + list_for_each_entry(vma, &vm->active_list, mm_list)
> i++;
> error->active_bo_count = i;
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> --
> 1.8.3.3
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
More information about the Intel-gfx
mailing list