[PATCH 15/20] drm/i915: Support to create write combined type vmaps

Chris Wilson chris at chris-wilson.co.uk
Tue Jul 26 15:40:28 UTC 2016


On Tue, Jul 26, 2016 at 08:47:10PM +0530, akash.goel at intel.com wrote:
> From: Chris Wilson <chris at chris-wilson.co.uk>
> 
> vmaps has a provision for controlling the page protection bits, with which
> we can use to control the mapping type, e.g. WB, WC, UC or even WT.
> To allow the caller to choose their mapping type, we add a parameter to
> i915_gem_object_pin_map - but we still only allow one vmap to be cached
> per object. If the object is currently not pinned, then we recreate the
> previous vmap with the new access type, but if it was pinned we report an
> error. This effectively limits the access via i915_gem_object_pin_map to a
> single mapping type for the lifetime of the object. Not usually a problem,
> but something to be aware of when setting up the object's vmap.
> 
> We will want to vary the access type to enable WC mappings of ringbuffer
> and context objects on !llc platforms, as well as other objects where we
> need coherent access to the GPU's pages without going through the GTT
> 
> v2: Remove the redundant braces around pin count check and fix the marker
>     in documentation (Chris)
> 
> v3:
> - Add a new enum for the vmalloc mapping type & pass that as an argument to
>   i915_object_pin_map. (Tvrtko)
> - Use PAGE_MASK to extract or filter the mapping type info and remove a
>   superfluous BUG_ON.(Tvrtko)
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Signed-off-by: Akash Goel <akash.goel at intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.h            |  9 ++++-
>  drivers/gpu/drm/i915/i915_gem.c            | 59 +++++++++++++++++++++++-------
>  drivers/gpu/drm/i915/i915_gem_dmabuf.c     |  2 +-
>  drivers/gpu/drm/i915/i915_guc_submission.c |  2 +-
>  drivers/gpu/drm/i915/intel_lrc.c           |  8 ++--
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>  6 files changed, 61 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 21ba169..35b32be 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -835,6 +835,11 @@ enum i915_cache_level {
>  	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
>  };
>  
> +enum i915_vmap_type {

enum i915_map_type {

> +	I915_VMAP_CACHED = 0,

MAP_WB = 0,
MAP_WC

> +	I915_VMAP_WC,
> +};
> +
>  struct i915_ctx_hang_stats {
>  	/* This context had batch pending when hang was declared */
>  	unsigned batch_pending;
> @@ -3141,6 +3146,7 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>  /**
>   * i915_gem_object_pin_map - return a contiguous mapping of the entire object
>   * @obj - the object to map into kernel address space
> + * @vmap_type - whether the vmalloc mapping should be using WC or WB pgprot_t
>   *
>   * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
>   * pages and then returns a contiguous mapping of the backing storage into
> @@ -3152,7 +3158,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>   * Returns the pointer through which to access the mapped object, or an
>   * ERR_PTR() on error.
>   */
> -void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
> +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
> +					enum i915_vmap_type vmap_type);
>  
>  /**
>   * i915_gem_object_unpin_map - releases an earlier mapping
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 40047eb..997bc18 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2115,10 +2115,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>  	list_del(&obj->global_list);
>  
>  	if (obj->mapping) {
> -		if (is_vmalloc_addr(obj->mapping))
> -			vunmap(obj->mapping);
> +		void *ptr = (void *)((uintptr_t)obj->mapping & PAGE_MASK);
> +		if (is_vmalloc_addr(ptr))
> +			vunmap(ptr);
>  		else
> -			kunmap(kmap_to_page(obj->mapping));
> +			kunmap(kmap_to_page(ptr));
>  		obj->mapping = NULL;
>  	}
>  
> @@ -2291,7 +2292,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>  }
>  
>  /* The 'mapping' part of i915_gem_object_pin_map() below */
> -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
> +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
> +				bool use_wc)
>  {
>  	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
>  	struct sg_table *sgt = obj->pages;
> @@ -2303,7 +2305,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
>  	void *addr;
>  
>  	/* A single page can always be kmapped */
> -	if (n_pages == 1)
> +	if (n_pages == 1 && !use_wc)
>  		return kmap(sg_page(sgt->sgl));
>  
>  	if (n_pages > ARRAY_SIZE(stack_pages)) {
> @@ -2319,7 +2321,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
>  	/* Check that we have the expected number of pages */
>  	GEM_BUG_ON(i != n_pages);
>  
> -	addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
> +	addr = vmap(pages, n_pages, VM_NO_GUARD,
> +		    use_wc ? pgprot_writecombine(PAGE_KERNEL_IO) : PAGE_KERNEL);
>  
>  	if (pages != stack_pages)
>  		drm_free_large(pages);
> @@ -2328,11 +2331,18 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
>  }
>  
>  /* get, pin, and map the pages of the object into kernel space */
> -void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
> +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
> +			      enum i915_vmap_type vmap_type)
>  {
> +	void *ptr;
> +	bool use_wc, has_wc;

Now you have an enum, so should these be. (use_wc is then dead.)

> +	bool pinned;
>  	int ret;
>  
>  	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	GEM_BUG_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0);
> +
> +	use_wc = (vmap_type == I915_VMAP_WC);

>  
>  	ret = i915_gem_object_get_pages(obj);
>  	if (ret)
> @@ -2340,15 +2350,38 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
>  
>  	i915_gem_object_pin_pages(obj);
>  
> -	if (!obj->mapping) {
> -		obj->mapping = i915_gem_object_map(obj);
> -		if (!obj->mapping) {
> -			i915_gem_object_unpin_pages(obj);
> -			return ERR_PTR(-ENOMEM);
> +	pinned = obj->pages_pin_count > 1;
> +	ptr = (void *)((uintptr_t)obj->mapping & PAGE_MASK);
> +	has_wc = (uintptr_t)obj->mapping & ~PAGE_MASK;

has_type = (uintptr_t)obj->mapping & ~PAGE_MASK;

> +
> +	if (ptr && has_wc != use_wc) {

if (ptr && has_type != type) {

> +		if (pinned) {
> +			ret = -EBUSY;
> +			goto err;
>  		}
> +
> +		if (is_vmalloc_addr(ptr))
> +			vunmap(ptr);
> +		else
> +			kunmap(kmap_to_page(ptr));
> +		ptr = obj->mapping = NULL;
>  	}
>  
> -	return obj->mapping;
> +	if (!ptr) {
> +		ptr = i915_gem_object_map(obj, use_wc);

ptr = i915_gem_object_map(obj, type);

> +		if (!ptr) {
> +			ret = -ENOMEM;
> +			goto err;
> +		}
> +
> +		obj->mapping = (void *)((uintptr_t)ptr | use_wc);

obj->mapping = (void *)((uintptr_t)ptr | type);
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre


More information about the Intel-gfx-trybot mailing list