[Intel-gfx] [PATCH 2/4] drm/i915: Improve dynamic management/eviction of lrc backing objects

Daniel Vetter daniel at ffwll.ch
Wed Oct 7 09:05:46 PDT 2015


On Tue, Oct 06, 2015 at 03:52:02PM +0100, Nick Hoath wrote:
> Shovel all context related objects through the active queue and obj
> management.
> 
> - Added callback in vma_(un)bind to add CPU (un)mapping at same time
>   if desired
> - Inserted LRC hw context & ringbuf to vma active list
> 
> Issue: VIZ-4277
> Signed-off-by: Nick Hoath <nicholas.hoath at intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.h         |  4 ++
>  drivers/gpu/drm/i915/i915_gem.c         |  3 ++
>  drivers/gpu/drm/i915/i915_gem_gtt.c     |  8 ++++
>  drivers/gpu/drm/i915/intel_lrc.c        | 28 +++++++++++--
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 71 ++++++++++++++++++---------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  3 --
>  6 files changed, 79 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 3d217f9..d660ee3 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2169,6 +2169,10 @@ struct drm_i915_gem_object {
>  			struct work_struct *work;
>  		} userptr;
>  	};
> +
> +	/** Support for automatic CPU side mapping of object */
> +	int (*mmap)(struct drm_i915_gem_object *obj, bool unmap);

I don't think we need a map hook, that can still be done (if not done so
already) by the callers. Also it's better to rename this to vma_unbind
(and it should be at the vma level I think) since there's other potential
users. So explicit maping, lazy unmapping for the kmaps we need. That's
the same design we're using for binding objects into gpu address spaces.

Also Chris Wilson has something similar, please align with him on the
precise design of this callback.

Thanks, Daniel

> +	void *mappable;
>  };
>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index fc82171..56e0e00 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -3262,6 +3262,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  	if (vma->pin_count)
>  		return -EBUSY;
>  
> +	if (obj->mmap)
> +		obj->mmap(obj, true);
> +
>  	BUG_ON(obj->pages == NULL);
>  
>  	if (wait) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 620d57e..786ec4b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -3495,6 +3495,14 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
>  
>  	vma->bound |= bind_flags;
>  
> +	if (vma->obj->mmap) {
> +		ret = vma->obj->mmap(vma->obj, false);
> +		if (ret) {
> +			i915_vma_unbind(vma);
> +			return ret;
> +		}
> +	}
> +
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index e8f5b6c..b807928 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -723,6 +723,18 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  
>  	intel_logical_ring_advance(request->ringbuf);
>  
> +	/* Push the hw context on to the active list */
> +	i915_vma_move_to_active(
> +			i915_gem_obj_to_ggtt(
> +				request->ctx->engine[ring->id].state),
> +			request);
> +
> +	/* Push the ringbuf on to the active list */
> +	i915_vma_move_to_active(
> +			i915_gem_obj_to_ggtt(
> +			request->ctx->engine[ring->id].ringbuf->obj),
> +			request);
> +
>  	request->tail = request->ringbuf->tail;
>  
>  	if (intel_ring_stopped(ring))
> @@ -1006,10 +1018,15 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
>  	if (ret)
>  		return ret;
>  
> -	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
> +	ret = i915_gem_obj_ggtt_pin(ringbuf->obj, PAGE_SIZE,
> +			PIN_MAPPABLE);
>  	if (ret)
>  		goto unpin_ctx_obj;
>  
> +	ret = i915_gem_object_set_to_gtt_domain(ringbuf->obj, true);
> +	if (ret)
> +		goto unpin_rb_obj;
> +
>  	ctx_obj->dirty = true;
>  
>  	/* Invalidate GuC TLB. */
> @@ -1018,6 +1035,8 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
>  
>  	return ret;
>  
> +unpin_rb_obj:
> +	i915_gem_object_ggtt_unpin(ringbuf->obj);
>  unpin_ctx_obj:
>  	i915_gem_object_ggtt_unpin(ctx_obj);
>  
> @@ -1052,7 +1071,7 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
>  	if (ctx_obj) {
>  		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
>  		if (--rq->ctx->engine[ring->id].pin_count == 0) {
> -			intel_unpin_ringbuffer_obj(ringbuf);
> +			i915_gem_object_ggtt_unpin(ringbuf->obj);
>  			i915_gem_object_ggtt_unpin(ctx_obj);
>  		}
>  	}
> @@ -2369,7 +2388,7 @@ void intel_lr_context_free(struct intel_context *ctx)
>  			struct intel_engine_cs *ring = ringbuf->ring;
>  
>  			if (ctx == ring->default_context) {
> -				intel_unpin_ringbuffer_obj(ringbuf);
> +				i915_gem_object_ggtt_unpin(ringbuf->obj);
>  				i915_gem_object_ggtt_unpin(ctx_obj);
>  			}
>  			WARN_ON(ctx->engine[ring->id].pin_count);
> @@ -2536,5 +2555,8 @@ void intel_lr_context_reset(struct drm_device *dev,
>  
>  		ringbuf->head = 0;
>  		ringbuf->tail = 0;
> +
> +		i915_gem_object_ggtt_unpin(
> +				ctx->engine[ring->id].state);
>  	}
>  }
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index c82c74c..79df8ca 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1958,38 +1958,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
>  	return 0;
>  }
>  
> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> +static int intel_mmap_ringbuffer_obj(struct drm_i915_gem_object *obj,
> +		bool unmap)
>  {
> -	iounmap(ringbuf->virtual_start);
> -	ringbuf->virtual_start = NULL;
> -	i915_gem_object_ggtt_unpin(ringbuf->obj);
> -}
> -
> -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
> -				     struct intel_ringbuffer *ringbuf)
> -{
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct drm_i915_gem_object *obj = ringbuf->obj;
> -	int ret;
> -
> -	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
> -	if (ret)
> -		return ret;
> -
> -	ret = i915_gem_object_set_to_gtt_domain(obj, true);
> -	if (ret) {
> -		i915_gem_object_ggtt_unpin(obj);
> -		return ret;
> -	}
> -
> -	ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
> -			i915_gem_obj_ggtt_offset(obj), ringbuf->size);
> -	if (ringbuf->virtual_start == NULL) {
> -		i915_gem_object_ggtt_unpin(obj);
> -		return -EINVAL;
> +	int ret = 0;
> +	struct intel_ringbuffer *ringbuf =
> +	(struct intel_ringbuffer *)obj->mappable;
> +
> +	if (!unmap) {
> +		struct drm_device *dev = ringbuf->ring->dev;
> +		struct drm_i915_private *dev_priv = to_i915(dev);
> +
> +		WARN_ON(ringbuf->virtual_start != NULL);
> +		if (ringbuf->virtual_start == NULL) {
> +			ringbuf->virtual_start = ioremap_wc(
> +					dev_priv->gtt.mappable_base +
> +					i915_gem_obj_ggtt_offset(obj),
> +					ringbuf->size);
> +			if (ringbuf->virtual_start == NULL) {
> +				i915_gem_object_ggtt_unpin(obj);
> +				return -EINVAL;
> +			}
> +		}
> +	} else {
> +		if (!i915_gem_obj_is_pinned(ringbuf->obj)) {
> +			iounmap(ringbuf->virtual_start);
> +			ringbuf->virtual_start = NULL;
> +		}
>  	}
> -
> -	return 0;
> +	return ret;
>  }
>  
>  static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> @@ -2016,6 +2013,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
>  
>  	ringbuf->obj = obj;
>  
> +	obj->mmap = intel_mmap_ringbuffer_obj;
> +	obj->mappable = ringbuf;
> +
>  	return 0;
>  }
>  
> @@ -2094,7 +2094,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  			goto error;
>  	}
>  
> -	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
> +	ret = i915_gem_obj_ggtt_pin(ringbuf->obj, PAGE_SIZE, PIN_MAPPABLE);
>  	if (ret) {
>  		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
>  				ring->name, ret);
> @@ -2102,12 +2102,19 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  		goto error;
>  	}
>  
> +	ret = i915_gem_object_set_to_gtt_domain(ringbuf->obj, true);
> +	if (ret)
> +		goto error_unpin;
> +
>  	ret = i915_cmd_parser_init_ring(ring);
>  	if (ret)
>  		goto error;
>  
>  	return 0;
>  
> +error_unpin:
> +	i915_gem_object_ggtt_unpin(ringbuf->obj);
> +	intel_destroy_ringbuffer_obj(ringbuf);
>  error:
>  	intel_ringbuffer_free(ringbuf);
>  	ring->buffer = NULL;
> @@ -2126,7 +2133,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
>  	intel_stop_ring_buffer(ring);
>  	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
>  
> -	intel_unpin_ringbuffer_obj(ring->buffer);
> +	i915_gem_object_ggtt_unpin(ring->buffer->obj);
>  	intel_ringbuffer_free(ring->buffer);
>  	ring->buffer = NULL;
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index d99b167..8daaf99 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -421,9 +421,6 @@ intel_write_status_page(struct intel_engine_cs *ring,
>  
>  struct intel_ringbuffer *
>  intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
> -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
> -				     struct intel_ringbuffer *ringbuf);
> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
>  void intel_ringbuffer_free(struct intel_ringbuffer *ring);
>  
>  void intel_stop_ring_buffer(struct intel_engine_cs *ring);
> -- 
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the Intel-gfx mailing list