[Intel-gfx] [PATCH 3/4] drm/i915: Add support for stealing purgable stolen pages

Goel, Akash akash.goel at intel.com
Fri Jul 31 07:24:28 PDT 2015



On 7/27/2015 3:08 PM, Daniel Vetter wrote:
> On Wed, Jul 22, 2015 at 07:21:48PM +0530, ankitprasad.r.sharma at intel.com wrote:
>> From: Chris Wilson <chris at chris-wilson.co.uk>
>>
>> If we run out of stolen memory when trying to allocate an object, see if
>> we can reap enough purgeable objects to free up enough contiguous free
>> space for the allocation. This is in principle very much like evicting
>> objects to free up enough contiguous space in the vma when binding
>> a new object - and you will be forgiven for thinking that the code looks
>> very similar.
>>
>> At the moment, we do not allow userspace to allocate objects in stolen,
>> so there is neither the memory pressure to trigger stolen eviction nor
>> any purgeable objects inside the stolen arena. However, this will change
>> in the near future, and so better management and defragmentation of
>> stolen memory will become a real issue.
>>
>> v2: Remember to remove the drm_mm_node.
>>
>> v3: Rebased to the latest drm-intel-nightly (Ankit)
>>
>> v4: correctedted if-else braces format (Tvrtko/kerneldoc)
>>
>> Testcase: igt/gem_stolen
>>
>> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
>> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>> ---
>>   drivers/gpu/drm/i915/i915_gem_stolen.c | 122 ++++++++++++++++++++++++++++++---
>>   1 file changed, 111 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> index 348ed5a..eaf0bdd 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> @@ -430,18 +430,29 @@ cleanup:
>>   	return NULL;
>>   }
>>
>> -struct drm_i915_gem_object *
>> -i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
>> +static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
>> +{
>> +	if (obj->stolen == NULL)
>> +		return false;
>> +
>> +	if (obj->madv != I915_MADV_DONTNEED)
>> +		return false;
>> +
>> +	if (i915_gem_obj_is_pinned(obj))
>> +		return false;
>> +
>> +	list_add(&obj->obj_exec_link, unwind);
>> +	return drm_mm_scan_add_block(obj->stolen);
>> +}
>> +
>> +static struct drm_mm_node *
>> +stolen_alloc(struct drm_i915_private *dev_priv, u32 size)
>>   {
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> -	struct drm_i915_gem_object *obj;
>>   	struct drm_mm_node *stolen;
>> +	struct drm_i915_gem_object *obj;
>> +	struct list_head unwind, evict;
>>   	int ret;
>>
>> -	if (!drm_mm_initialized(&dev_priv->mm.stolen))
>> -		return NULL;
>> -
>> -	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
>>   	if (size == 0)
>>   		return NULL;
>>
>> @@ -451,11 +462,100 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
>>
>>   	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
>>   				 4096, DRM_MM_SEARCH_DEFAULT);
>> -	if (ret) {
>> -		kfree(stolen);
>> -		return NULL;
>> +	if (ret == 0)
>> +		return stolen;
>> +
>> +	/* No more stolen memory available, or too fragmented.
>> +	 * Try evicting purgeable objects and search again.
>> +	 */
>> +
>> +	drm_mm_init_scan(&dev_priv->mm.stolen, size, 4096, 0);
>> +	INIT_LIST_HEAD(&unwind);
>> +
>> +	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
>> +		if (mark_free(obj, &unwind))
>> +			goto found;
>> +
>> +	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
>> +		if (mark_free(obj, &unwind))
>> +			goto found;
>
> Chris and I just discussed on irc that the bound_list isn't in a great LRU
> order right now and Chris sent out a fix for that. But it only works if we
> preferrentially shrink inactive objects first. Worth the bother or just a
> FIXME?
Sorry for the late response. Do you mean to say here that within the 
bound list, first the inactive stolen objects should be considered for 
purge ?
Is it very likely that an active bo will also be marked as purgeable ?

> For the fb use-case alone it's not needed since we can't remove the
> fb until it's no longer being displayed (otherwise the backwards-compat
> code kicks in and synchronously kills the display at RMFB time), and that
> pretty much means we can't put the underlying bo into any cache (and mark
> it purgeable) either.
Here do you mean that a frame buffer bo should not be (or cannot be) 
marked as purgeable by User, if it is still being scanned out ?

Best regards
Akash

  But a FIXME comment here would be good for sure,
> just in case this assumption ever gets broken.
> -Daniel
>
>> +
>> +found:
>> +	INIT_LIST_HEAD(&evict);
>> +	while (!list_empty(&unwind)) {
>> +		obj = list_first_entry(&unwind,
>> +				       struct drm_i915_gem_object,
>> +				       obj_exec_link);
>> +		list_del_init(&obj->obj_exec_link);
>> +
>> +		if (drm_mm_scan_remove_block(obj->stolen)) {
>> +			list_add(&obj->obj_exec_link, &evict);
>> +			drm_gem_object_reference(&obj->base);
>> +		}
>>   	}
>>
>> +	ret = 0;
>> +	while (!list_empty(&evict)) {
>> +		obj = list_first_entry(&evict,
>> +				       struct drm_i915_gem_object,
>> +				       obj_exec_link);
>> +		list_del_init(&obj->obj_exec_link);
>> +
>> +		if (ret == 0) {
>> +			struct i915_vma *vma, *vma_next;
>> +
>> +			list_for_each_entry_safe(vma, vma_next,
>> +						 &obj->vma_list,
>> +						 vma_link)
>> +				if (i915_vma_unbind(vma))
>> +					break;
>> +
>> +			/* Stolen pins its pages to prevent the
>> +			 * normal shrinker from processing stolen
>> +			 * objects.
>> +			 */
>> +			i915_gem_object_unpin_pages(obj);
>> +
>> +			ret = i915_gem_object_put_pages(obj);
>> +			if (ret == 0) {
>> +				i915_gem_object_release_stolen(obj);
>> +				obj->madv = __I915_MADV_PURGED;
>> +			} else {
>> +				i915_gem_object_pin_pages(obj);
>> +			}
>> +		}
>> +
>> +		drm_gem_object_unreference(&obj->base);
>> +	}
>> +
>> +	if (ret == 0)
>> +		ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
>> +					 4096, DRM_MM_SEARCH_DEFAULT);
>> +	if (ret == 0)
>> +		return stolen;
>> +
>> +	kfree(stolen);
>> +	return NULL;
>> +}
>> +
>> +struct drm_i915_gem_object *
>> +i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
>> +{
>> +	struct drm_i915_private *dev_priv = dev->dev_private;
>> +	struct drm_i915_gem_object *obj;
>> +	struct drm_mm_node *stolen;
>> +
>> +	lockdep_assert_held(&dev->struct_mutex);
>> +
>> +	if (!drm_mm_initialized(&dev_priv->mm.stolen))
>> +		return NULL;
>> +
>> +	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
>> +
>> +	stolen = stolen_alloc(dev_priv, size);
>> +	if (stolen == NULL)
>> +		return NULL;
>> +
>>   	obj = _i915_gem_object_create_stolen(dev, stolen);
>>   	if (obj)
>>   		return obj;
>> --
>> 1.9.1
>>
>> _______________________________________________
>> Intel-gfx mailing list
>> Intel-gfx at lists.freedesktop.org
>> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
>


More information about the Intel-gfx mailing list