[Intel-gfx] [PATCH 07/16] drm/i915: don't call shmem_read_mapping unnecessarily

Daniel Vetter daniel at ffwll.ch
Mon Mar 26 11:10:05 CEST 2012


On Sun, Mar 25, 2012 at 07:47:34PM +0200, Daniel Vetter wrote:
> This speeds up pwrite and pread from ~120 µs ro ~100 µs for
> reading/writing 1mb on my snb (if the backing storage pages
> are already pinned, of course).
> 
> v2: Chris Wilson pointed out a claring page reference bug - I've
s/claring/glaring, noticed by Chris.

Note to self: Fix this when applying.
-Daniel

> unconditionally dropped the reference. With that fixed (and the
> associated reduction of dirt in dmesg) it's now even a notch faster.
> 
> v3: Unconditionaly grab a page reference when dropping
> dev->struct_mutex to simplify the code-flow.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
> ---
>  drivers/gpu/drm/i915/i915_gem.c |   42 +++++++++++++++++++++++++++-----------
>  1 files changed, 30 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index c5b250c..117fda4 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -321,6 +321,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
>  	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
>  	int hit_slowpath = 0;
>  	int needs_clflush = 0;
> +	int release_page;
>  
>  	user_data = (char __user *) (uintptr_t) args->data_ptr;
>  	remain = args->size;
> @@ -355,10 +356,16 @@ i915_gem_shmem_pread(struct drm_device *dev,
>  		if ((shmem_page_offset + page_length) > PAGE_SIZE)
>  			page_length = PAGE_SIZE - shmem_page_offset;
>  
> -		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
> -		if (IS_ERR(page)) {
> -			ret = PTR_ERR(page);
> -			goto out;
> +		if (obj->pages) {
> +			page = obj->pages[offset >> PAGE_SHIFT];
> +			release_page = 0;
> +		} else {
> +			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
> +			if (IS_ERR(page)) {
> +				ret = PTR_ERR(page);
> +				goto out;
> +			}
> +			release_page = 1;
>  		}
>  
>  		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
> @@ -378,7 +385,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
>  		}
>  
>  		hit_slowpath = 1;
> -
> +		page_cache_get(page);
>  		mutex_unlock(&dev->struct_mutex);
>  
>  		vaddr = kmap(page);
> @@ -397,9 +404,11 @@ i915_gem_shmem_pread(struct drm_device *dev,
>  		kunmap(page);
>  
>  		mutex_lock(&dev->struct_mutex);
> +		page_cache_release(page);
>  next_page:
>  		mark_page_accessed(page);
> -		page_cache_release(page);
> +		if (release_page)
> +			page_cache_release(page);
>  
>  		if (ret) {
>  			ret = -EFAULT;
> @@ -680,6 +689,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
>  	int shmem_page_offset, page_length, ret = 0;
>  	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
>  	int hit_slowpath = 0;
> +	int release_page;
>  
>  	user_data = (char __user *) (uintptr_t) args->data_ptr;
>  	remain = args->size;
> @@ -704,10 +714,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
>  		if ((shmem_page_offset + page_length) > PAGE_SIZE)
>  			page_length = PAGE_SIZE - shmem_page_offset;
>  
> -		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
> -		if (IS_ERR(page)) {
> -			ret = PTR_ERR(page);
> -			goto out;
> +		if (obj->pages) {
> +			page = obj->pages[offset >> PAGE_SHIFT];
> +			release_page = 0;
> +		} else {
> +			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
> +			if (IS_ERR(page)) {
> +				ret = PTR_ERR(page);
> +				goto out;
> +			}
> +			release_page = 1;
>  		}
>  
>  		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
> @@ -725,7 +741,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
>  		}
>  
>  		hit_slowpath = 1;
> -
> +		page_cache_get(page);
>  		mutex_unlock(&dev->struct_mutex);
>  
>  		vaddr = kmap(page);
> @@ -740,10 +756,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
>  		kunmap(page);
>  
>  		mutex_lock(&dev->struct_mutex);
> +		page_cache_release(page);
>  next_page:
>  		set_page_dirty(page);
>  		mark_page_accessed(page);
> -		page_cache_release(page);
> +		if (release_page)
> +			page_cache_release(page);
>  
>  		if (ret) {
>  			ret = -EFAULT;
> -- 
> 1.7.7.6
> 

-- 
Daniel Vetter
Mail: daniel at ffwll.ch
Mobile: +41 (0)79 365 57 48



More information about the Intel-gfx mailing list