[Intel-gfx] [PATCH 2/4] drm/i915: Include bound and active pages in the count of shrinkable objects

Barbalho, Rafael rafael.barbalho at intel.com
Mon May 19 18:04:59 CEST 2014


> -----Original Message-----
> From: Intel-gfx [mailto:intel-gfx-bounces at lists.freedesktop.org] On Behalf
> Of Chris Wilson
> Sent: Tuesday, March 25, 2014 1:23 PM
> To: intel-gfx at lists.freedesktop.org
> Cc: Hugh Dickins
> Subject: [Intel-gfx] [PATCH 2/4] drm/i915: Include bound and active pages in
> the count of shrinkable objects
> 
> When the machine is under a lot of memory pressure and being stressed by
> multiple GPU threads, we quite often report fewer than shrinker->batch (i.e.
> SHRINK_BATCH) pages to be freed. This causes the shrink_control to skip
> calling into i915.ko to release pages, despite the GPU holding onto most of
> the physical pages in its active lists.
> 
> References: https://bugs.freedesktop.org/show_bug.cgi?id=72742
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Rafael Barbalho <rafael.barbalho at intel.com>
> ---
>  drivers/gpu/drm/i915/i915_dma.c |  8 ++++----
> drivers/gpu/drm/i915/i915_drv.h |  2 +-  drivers/gpu/drm/i915/i915_gem.c |
> 42 +++++++++++++++++++++++------------------
>  3 files changed, 29 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_dma.c
> b/drivers/gpu/drm/i915/i915_dma.c index 4e0a26a83500..5a37c75f4b3d
> 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -1739,8 +1739,8 @@ out_power_well:
>  	intel_power_domains_remove(dev_priv);
>  	drm_vblank_cleanup(dev);
>  out_gem_unload:
> -	if (dev_priv->mm.inactive_shrinker.scan_objects)
> -		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
> +	if (dev_priv->mm.shrinker.scan_objects)
> +		unregister_shrinker(&dev_priv->mm.shrinker);
> 
>  	if (dev->pdev->msi_enabled)
>  		pci_disable_msi(dev->pdev);
> @@ -1791,8 +1791,8 @@ int i915_driver_unload(struct drm_device *dev)
> 
>  	i915_teardown_sysfs(dev);
> 
> -	if (dev_priv->mm.inactive_shrinker.scan_objects)
> -		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
> +	if (dev_priv->mm.shrinker.scan_objects)
> +		unregister_shrinker(&dev_priv->mm.shrinker);
> 
>  	io_mapping_free(dev_priv->gtt.mappable);
>  	arch_phys_wc_del(dev_priv->gtt.mtrr);
> diff --git a/drivers/gpu/drm/i915/i915_drv.h
> b/drivers/gpu/drm/i915/i915_drv.h index a7ad864f1154..cb4bb171e6cc
> 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -986,7 +986,7 @@ struct i915_gem_mm {
>  	/** PPGTT used for aliasing the PPGTT with the GTT */
>  	struct i915_hw_ppgtt *aliasing_ppgtt;
> 
> -	struct shrinker inactive_shrinker;
> +	struct shrinker shrinker;
>  	bool shrinker_no_lock_stealing;
> 
>  	/** LRU list of objects with fence regs on them. */ diff --git
> a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index c23034021753..219fe35f9c45 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -54,9 +54,9 @@ static void i915_gem_object_update_fence(struct
> drm_i915_gem_object *obj,
>  					 struct drm_i915_fence_reg *fence,
>  					 bool enable);
> 
> -static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
> +static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
>  					     struct shrink_control *sc);
> -static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
> +static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
>  					    struct shrink_control *sc);
>  static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv,
> long target);  static unsigned long i915_gem_shrink_all(struct
> drm_i915_private *dev_priv); @@ -4651,10 +4651,10 @@
> i915_gem_load(struct drm_device *dev)
> 
>  	dev_priv->mm.interruptible = true;
> 
> -	dev_priv->mm.inactive_shrinker.scan_objects =
> i915_gem_inactive_scan;
> -	dev_priv->mm.inactive_shrinker.count_objects =
> i915_gem_inactive_count;
> -	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
> -	register_shrinker(&dev_priv->mm.inactive_shrinker);
> +	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
> +	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
> +	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
> +	register_shrinker(&dev_priv->mm.shrinker);
>  }
> 
>  /*
> @@ -4913,13 +4913,23 @@ static bool mutex_is_locked_by(struct mutex
> *mutex, struct task_struct *task)  #endif  }
> 
> +static int num_vma_bound(struct drm_i915_gem_object *obj) {
> +	struct i915_vma *vma;
> +	int count = 0;
> +
> +	list_for_each_entry(vma, &obj->vma_list, vma_link)
> +		if (drm_mm_node_allocated(&vma->node))
> +			count++;
> +
> +	return count;
> +}
> +
>  static unsigned long
> -i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control
> *sc)
> +i915_gem_shrinker_count(struct shrinker *shrinker, struct
> +shrink_control *sc)
>  {
>  	struct drm_i915_private *dev_priv =
> -		container_of(shrinker,
> -			     struct drm_i915_private,
> -			     mm.inactive_shrinker);
> +		container_of(shrinker, struct drm_i915_private,
> mm.shrinker);
>  	struct drm_device *dev = dev_priv->dev;
>  	struct drm_i915_gem_object *obj;
>  	bool unlock = true;
> @@ -4941,10 +4951,8 @@ i915_gem_inactive_count(struct shrinker
> *shrinker, struct shrink_control *sc)
>  			count += obj->base.size >> PAGE_SHIFT;
> 
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> -		if (obj->active)
> -			continue;
> -
> -		if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count
> == 0)
> +		if (!i915_gem_obj_is_pinned(obj) &&
> +		    obj->pages_pin_count == num_vma_bound(obj))
>  			count += obj->base.size >> PAGE_SHIFT;
>  	}
> 
> @@ -5017,12 +5025,10 @@ unsigned long i915_gem_obj_size(struct
> drm_i915_gem_object *o,  }
> 
>  static unsigned long
> -i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
> +i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control
> +*sc)
>  {
>  	struct drm_i915_private *dev_priv =
> -		container_of(shrinker,
> -			     struct drm_i915_private,
> -			     mm.inactive_shrinker);
> +		container_of(shrinker, struct drm_i915_private,
> mm.shrinker);
>  	struct drm_device *dev = dev_priv->dev;
>  	unsigned long freed;
>  	bool unlock = true;
> --
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx



More information about the Intel-gfx mailing list