[Intel-gfx] [PATCH 20/33] drm/i915: Use VMA for ringbuffer tracking

Joonas Lahtinen joonas.lahtinen at linux.intel.com
Thu Aug 11 09:32:50 UTC 2016


On su, 2016-08-07 at 15:45 +0100, Chris Wilson wrote:
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 03a4d2ae71db..761201ff6b34 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -343,7 +343,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>  	for_each_engine(engine, dev_priv) {
>  		struct intel_context *ce = &ctx->engine[engine->id];
>  		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
> -		struct drm_i915_gem_object *obj;
> +		struct i915_vma *vma;
>  
>  		/* TODO: We have a design issue to be solved here. Only when we
>  		 * receive the first batch, we know which engine is used by the
> @@ -358,17 +358,15 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>  		lrc->context_desc = lower_32_bits(ce->lrc_desc);
>  
>  		/* The state page is after PPHWSP */
> -		gfx_addr = ce->state->node.start;
> -		lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
> +		vma = ce->state;
> +		lrc->ring_lcra = vma->node.start + LRC_STATE_PN * PAGE_SIZE;

An alias just for this line? Maybe not.

>  		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
>  				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
>  
> -		obj = ce->ring->obj;
> -		gfx_addr = i915_gem_obj_ggtt_offset(obj);
> -
> -		lrc->ring_begin = gfx_addr;
> -		lrc->ring_end = gfx_addr + obj->base.size - 1;
> -		lrc->ring_next_free_location = gfx_addr;
> +		vma = ce->ring->vma;
> +		lrc->ring_begin = vma->node.start;
> +		lrc->ring_end = vma->node.start + vma->node.size - 1;
> +		lrc->ring_next_free_location = lrc->ring_begin;

Again, an alias for three lines? And it's a multipurpose alias too, so
double nope.

> @@ -1744,16 +1744,17 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
>  static int
>  lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
>  {
> +#define HWS_OFFSET (LRC_PPHWSP_PN * PAGE_SIZE)

Wouldn't this go next to LRC_PPHWSP_PN?

> @@ -1853,79 +1852,78 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
>  
>  static void cleanup_status_page(struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
>  
> -	obj = engine->status_page.obj;
> -	if (obj == NULL)
> +	vma = nullify(&engine->status_page.vma);
> +	if (!vma)
>  		return;
>  
> -	kunmap(sg_page(obj->pages->sgl));
> -	i915_gem_object_ggtt_unpin(obj);
> -	i915_gem_object_put(obj);
> -	engine->status_page.obj = NULL;
> +	i915_vma_unpin(vma);
> +	i915_gem_object_unpin_map(vma->obj);
> +	i915_gem_object_put(vma->obj);

This looks tad strange, because usually one first does all 'foo->bar'
releases and then 'foo'. Just commenting here.

<SNIP>

> -	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
> -	engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
> -	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
> +	flags = PIN_GLOBAL;
> +	if (!HAS_LLC(engine->i915))
> +		/* On g33, we cannot place HWS above 256MiB, so
> +		 * restrict its pinning to the low mappable arena.
> +		 * Though this restriction is not documented for
> +		 * gen4, gen5, or byt, they also behave similarly
> +		 * and hang if the HWS is placed at the top of the
> +		 * GTT. To generalise, it appears that all !llc
> +		 * platforms have issues with us placing the HWS
> +		 * above the mappable region (even though we never
> +		 * actualy map it).
> +		 */
> +		flags |= PIN_MAPPABLE;

For readability, I'd move the comment one level up and before the if.
 
> +	DRM_DEBUG_DRIVER("%s hws offset: 0x%08llx\n",
> +			 engine->name, vma->node.start);
>  	return 0;
> +
> +err_unref:

Sole error label, could be err.

>  
>  int intel_ring_pin(struct intel_ring *ring)
>  {
> -	struct drm_i915_private *dev_priv = ring->engine->i915;
> -	struct drm_i915_gem_object *obj = ring->obj;
>  	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
> -	unsigned flags = PIN_OFFSET_BIAS | 4096;
> +	unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
>  	void *addr;
>  	int ret;
>  
> -	if (HAS_LLC(dev_priv) && !obj->stolen) {
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags);
> -		if (ret)
> -			return ret;
> -
> -		ret = i915_gem_object_set_to_cpu_domain(obj, true);
> -		if (ret)
> -			goto err_unpin;
> -
> -		addr = i915_gem_object_pin_map(obj);
> -		if (IS_ERR(addr)) {
> -			ret = PTR_ERR(addr);
> -			goto err_unpin;
> -		}
> -	} else {
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
> -					       flags | PIN_MAPPABLE);
> -		if (ret)
> -			return ret;
> +	GEM_BUG_ON(ring->vaddr);
>  
> -		ret = i915_gem_object_set_to_gtt_domain(obj, true);
> -		if (ret)
> -			goto err_unpin;
> +	if (ring->vmap)
> +		flags |= PIN_MAPPABLE;
>  
> -		/* Access through the GTT requires the device to be awake. */
> -		assert_rpm_wakelock_held(dev_priv);

This wakelock disappears in this patch.

> +	ret = i915_vma_pin(ring->vma, 0, PAGE_SIZE, flags);
> +	if (unlikely(ret))
> +		return ret;
>  
> -		addr = (void __force *)
> -			i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
> -		if (IS_ERR(addr)) {
> -			ret = PTR_ERR(addr);
> -			goto err_unpin;
> -		}
> +	if (ring->vmap)
> +		addr = i915_gem_object_pin_map(ring->vma->obj);
> +	else
> +		addr = (void __force *)i915_vma_pin_iomap(ring->vma);

Wakelock needed in this path?

> +	if (IS_ERR(addr)) {
> +		i915_vma_unpin(ring->vma);
> +		return PTR_ERR(addr);

Keep the good ol' teardown path.


>  	}
>  
>  	ring->vaddr = addr;
> -	ring->vma = i915_gem_obj_to_ggtt(obj);
>  	return 0;
> -
> -err_unpin:
> -	i915_gem_object_ggtt_unpin(obj);
> -	return ret;
>  }
>  
> -static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> -				      struct intel_ring *ring)
> +static struct i915_vma *
> +intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
>  {
>  	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
> +	int ret;
>  
> -	obj = NULL;
> -	if (!HAS_LLC(dev))
> -		obj = i915_gem_object_create_stolen(dev, ring->size);
> -	if (obj == NULL)
> -		obj = i915_gem_object_create(dev, ring->size);
> +	obj = ERR_PTR(-ENODEV);
> +	if (!HAS_LLC(dev_priv))
> +		obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
>  	if (IS_ERR(obj))
> -		return PTR_ERR(obj);
> +		obj = i915_gem_object_create(&dev_priv->drm, size);
> +	if (IS_ERR(obj))
> +		return ERR_CAST(obj);
>  
>  	/* mark ring buffers as read-only from GPU side by default */
>  	obj->gt_ro = 1;
>  
> -	ring->obj = obj;
> +	if (HAS_LLC(dev_priv) && !obj->stolen)
> +		ret = i915_gem_object_set_to_cpu_domain(obj, true);
> +	else
> +		ret = i915_gem_object_set_to_gtt_domain(obj, true);
> +	if (ret) {
> +		vma = ERR_PTR(ret);
> +		goto err;
> +	}

Might be worth mentioning that the ring objects are now moved to their
domain at the time creation, not pinning. Any specific reason for the
change?

Also mention that you're silencing quite a few debugs and one
DRM_ERROR.

> @@ -2060,22 +2040,23 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
>  	ring->last_retired_head = -1;
>  	intel_ring_update_space(ring);
>  
> -	ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
> -	if (ret) {
> -		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
> -				 engine->name, ret);
> -		list_del(&ring->link);
> +	vma = intel_ring_create_vma(engine->i915, size);
> +	if (IS_ERR(vma)) {
>  		kfree(ring);
> -		return ERR_PTR(ret);
> +		return ERR_CAST(vma);
>  	}
> +	ring->vma = vma;
> +	if (HAS_LLC(engine->i915) && !vma->obj->stolen)
> +		ring->vmap = true;

use_vmap/need_vmap or something? 'vmap' sounds like the actual mapping.

>  		ret = init_status_page(engine);
> @@ -2184,11 +2164,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
>  
>  	ret = intel_ring_pin(ring);
>  	if (ret) {
> -		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
> -				engine->name, ret);
> -		intel_destroy_ringbuffer_obj(ring);
> +		intel_ring_free(ring);

Shouldn't this be like goto err_ring?

>  		goto error;
>  	}
> +	engine->buffer = ring;
>  
>  	return 0;
>   
>  	struct intel_engine_cs *engine;
>  	struct list_head link;
> @@ -97,6 +96,7 @@ struct intel_ring {
>  	int space;
>  	int size;
>  	int effective_size;
> +	bool vmap;

Renaming suggested above.

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation


More information about the Intel-gfx mailing list