[Intel-gfx] [PATCH 06/15] drm/i915: Split vma exec_link/evict_link

Mika Kuoppala mika.kuoppala at linux.intel.com
Fri Feb 24 12:20:10 UTC 2017


Chris Wilson <chris at chris-wilson.co.uk> writes:

> Currently the vma has one link member that is used for both holding its
> place in the execbuf reservation list, and in any eviction list. This
> dual property is quite tricky and error prone.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

Reviewed-by: Mika Kuoppala <mika.kuoppala at intel.com>

> ---
>  drivers/gpu/drm/i915/i915_gem_evict.c      | 14 ++++++-------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 32 +++++++++++++++---------------
>  drivers/gpu/drm/i915/i915_vma.h            |  7 +++++--
>  3 files changed, 28 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 4753c3f46f7e..2a6eb2ceff79 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -62,7 +62,7 @@ mark_free(struct drm_mm_scan *scan,
>  	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
>  		return false;
>  
> -	list_add(&vma->exec_list, unwind);
> +	list_add(&vma->evict_link, unwind);
>  	return drm_mm_scan_add_block(scan, &vma->node);
>  }
>  
> @@ -154,7 +154,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
>  	} while (*++phase);
>  
>  	/* Nothing found, clean up and bail out! */
> -	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
> +	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
>  		ret = drm_mm_scan_remove_block(&scan, &vma->node);
>  		BUG_ON(ret);
>  	}
> @@ -201,16 +201,16 @@ i915_gem_evict_something(struct i915_address_space *vm,
>  	 * calling unbind (which may remove the active reference
>  	 * of any of our objects, thus corrupting the list).
>  	 */
> -	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
> +	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
>  		if (drm_mm_scan_remove_block(&scan, &vma->node))
>  			__i915_vma_pin(vma);
>  		else
> -			list_del(&vma->exec_list);
> +			list_del(&vma->evict_link);
>  	}
>  
>  	/* Unbinding will emit any required flushes */
>  	ret = 0;
> -	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
> +	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
>  		__i915_vma_unpin(vma);
>  		if (ret == 0)
>  			ret = i915_vma_unbind(vma);
> @@ -323,10 +323,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
>  		 * reference) another in our eviction list.
>  		 */
>  		__i915_vma_pin(vma);
> -		list_add(&vma->exec_list, &eviction_list);
> +		list_add(&vma->evict_link, &eviction_list);
>  	}
>  
> -	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
> +	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
>  		__i915_vma_unpin(vma);
>  		if (ret == 0)
>  			ret = i915_vma_unbind(vma);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 9c1dacabe7ef..c229d69b8757 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -134,7 +134,7 @@ eb_reset(struct i915_execbuffer *eb)
>  {
>  	struct i915_vma *vma;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		eb_unreserve_vma(vma);
>  		i915_vma_put(vma);
>  		vma->exec_entry = NULL;
> @@ -147,7 +147,7 @@ eb_reset(struct i915_execbuffer *eb)
>  static struct i915_vma *
>  eb_get_batch(struct i915_execbuffer *eb)
>  {
> -	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
> +	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
>  
>  	/*
>  	 * SNA is doing fancy tricks with compressing batch buffers, which leads
> @@ -224,7 +224,7 @@ eb_lookup_vmas(struct i915_execbuffer *eb)
>  		}
>  
>  		/* Transfer ownership from the objects list to the vmas list. */
> -		list_add_tail(&vma->exec_list, &eb->vmas);
> +		list_add_tail(&vma->exec_link, &eb->vmas);
>  		list_del_init(&obj->obj_exec_link);
>  
>  		vma->exec_entry = &eb->exec[i];
> @@ -283,7 +283,7 @@ static void eb_destroy(struct i915_execbuffer *eb)
>  {
>  	struct i915_vma *vma;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		if (!vma->exec_entry)
>  			continue;
>  
> @@ -748,7 +748,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
>  	struct i915_vma *vma;
>  	int ret = 0;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		ret = eb_relocate_vma(vma, eb);
>  		if (ret)
>  			break;
> @@ -900,7 +900,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
>  		struct drm_i915_gem_exec_object2 *entry;
>  		bool need_fence, need_mappable;
>  
> -		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
> +		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
>  		obj = vma->obj;
>  		entry = vma->exec_entry;
>  
> @@ -915,12 +915,12 @@ static int eb_reserve(struct i915_execbuffer *eb)
>  		need_mappable = need_fence || need_reloc_mappable(vma);
>  
>  		if (entry->flags & EXEC_OBJECT_PINNED)
> -			list_move_tail(&vma->exec_list, &pinned_vmas);
> +			list_move_tail(&vma->exec_link, &pinned_vmas);
>  		else if (need_mappable) {
>  			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
> -			list_move(&vma->exec_list, &ordered_vmas);
> +			list_move(&vma->exec_link, &ordered_vmas);
>  		} else
> -			list_move_tail(&vma->exec_list, &ordered_vmas);
> +			list_move_tail(&vma->exec_link, &ordered_vmas);
>  
>  		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
>  		obj->base.pending_write_domain = 0;
> @@ -945,7 +945,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
>  		int ret = 0;
>  
>  		/* Unbind any ill-fitting objects or pin. */
> -		list_for_each_entry(vma, &eb->vmas, exec_list) {
> +		list_for_each_entry(vma, &eb->vmas, exec_link) {
>  			if (!drm_mm_node_allocated(&vma->node))
>  				continue;
>  
> @@ -958,7 +958,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
>  		}
>  
>  		/* Bind fresh objects */
> -		list_for_each_entry(vma, &eb->vmas, exec_list) {
> +		list_for_each_entry(vma, &eb->vmas, exec_link) {
>  			if (drm_mm_node_allocated(&vma->node))
>  				continue;
>  
> @@ -972,7 +972,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
>  			return ret;
>  
>  		/* Decrement pin count for bound objects */
> -		list_for_each_entry(vma, &eb->vmas, exec_list)
> +		list_for_each_entry(vma, &eb->vmas, exec_link)
>  			eb_unreserve_vma(vma);
>  
>  		ret = i915_gem_evict_vm(eb->vm, true);
> @@ -1061,7 +1061,7 @@ eb_relocate_slow(struct i915_execbuffer *eb)
>  	if (ret)
>  		goto err;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		int idx = vma->exec_entry - eb->exec;
>  
>  		ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
> @@ -1087,7 +1087,7 @@ eb_move_to_gpu(struct i915_execbuffer *eb)
>  	struct i915_vma *vma;
>  	int ret;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
> @@ -1309,7 +1309,7 @@ eb_move_to_active(struct i915_execbuffer *eb)
>  {
>  	struct i915_vma *vma;
>  
> -	list_for_each_entry(vma, &eb->vmas, exec_list) {
> +	list_for_each_entry(vma, &eb->vmas, exec_link) {
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		obj->base.write_domain = obj->base.pending_write_domain;
> @@ -1383,7 +1383,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
>  		memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
>  	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
>  	i915_gem_object_get(shadow_batch_obj);
> -	list_add_tail(&vma->exec_list, &eb->vmas);
> +	list_add_tail(&vma->exec_link, &eb->vmas);
>  
>  out:
>  	i915_gem_object_unpin_pages(shadow_batch_obj);
> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
> index 2e03f81dddbe..4d827300d1a8 100644
> --- a/drivers/gpu/drm/i915/i915_vma.h
> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -100,8 +100,11 @@ struct i915_vma {
>  	struct list_head obj_link; /* Link in the object's VMA list */
>  	struct rb_node obj_node;
>  
> -	/** This vma's place in the batchbuffer or on the eviction list */
> -	struct list_head exec_list;
> +	/** This vma's place in the execbuf reservation list */
> +	struct list_head exec_link;
> +
> +	/** This vma's place in the eviction list */
> +	struct list_head evict_link;
>  
>  	/**
>  	 * Used for performing relocations during execbuffer insertion.
> -- 
> 2.11.0
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx


More information about the Intel-gfx mailing list