[Intel-gfx] [PATCH 6/6] drm/i915: Migrate stolen objects before hibernation

Ankitprasad Sharma ankitprasad.r.sharma at intel.com
Mon Oct 12 22:25:10 PDT 2015


On Thu, 2015-10-08 at 12:02 +0100, Chris Wilson wrote:
> On Thu, Oct 08, 2015 at 11:54:29AM +0530, ankitprasad.r.sharma at intel.com wrote:
> > +	/* stolen objects are already pinned to prevent shrinkage */
> > +	memset(&node, 0, sizeof(node));
> > +	ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm,
> > +						  &node,
> > +						  4096, 0, I915_CACHE_NONE,
> > +						  0, i915->gtt.mappable_end,
> > +						  DRM_MM_SEARCH_DEFAULT,
> > +						  DRM_MM_CREATE_DEFAULT);
> > +	if (ret)
> > +		return ret;
> > +
> > +	i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages,
> > +				      node.start, I915_CACHE_NONE, 0);
> 
> This was written using an insert_page() function you don't have. Either
> grab that as well, or you need to pin the entire object into the GGTT,
> i.e. i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); note that to do so
> will also need to be very careful to handle the pinning of obj->pages
> and the introduction of a new GGTT vma.
> 
> > +
> > +	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
> > +		struct page *page;
> > +		void *__iomem src;
> > +		void *dst;
> > +
> > +		page = shmem_read_mapping_page(mapping, i);
> > +		if (IS_ERR(page)) {
> > +			ret = PTR_ERR(page);
> > +			goto err_node;
> > +		}
> > +
> > +		src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start + PAGE_SIZE * i);
> > +		dst = kmap_atomic(page);
> > +		memcpy_fromio(dst, src, PAGE_SIZE);
> > +		kunmap_atomic(dst);
> > +		io_mapping_unmap_atomic(src);
> > +
> > +		page_cache_release(page);
> > +	}
> > +
> > +	wmb();
> > +	i915->gtt.base.clear_range(&i915->gtt.base,
> > +				   node.start, node.size,
> > +				   true);
> > +	drm_mm_remove_node(&node);
> > +
> > +swap_pages:
> > +	stolen_pages = obj->pages;
> > +	obj->pages = NULL;
> > +
> > +	obj->base.filp = file;
> > +	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
> > +	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
> > +
> > +	/* Recreate any pinned binding with pointers to the new storage */
> > +	if (!list_empty(&obj->vma_list)) {
> > +		ret = i915_gem_object_get_pages_gtt(obj);
> > +		if (ret) {
> > +			obj->pages = stolen_pages;
> > +			goto err_file;
> > +		}
> > +
> > +		ret = i915_gem_gtt_prepare_object(obj);
> > +		if (ret) {
> > +			i915_gem_object_put_pages_gtt(obj);
> > +			obj->pages = stolen_pages;
> > +			goto err_file;
> > +		}
> > +
> > +		ret = i915_gem_object_set_to_gtt_domain(obj, true);
> > +		if (ret) {
> > +			i915_gem_gtt_finish_object(obj);
> > +			i915_gem_object_put_pages_gtt(obj);
> > +			obj->pages = stolen_pages;
> > +			goto err_file;
> > +		}
> > +
> > +		obj->get_page.sg = obj->pages->sgl;
> > +		obj->get_page.last = 0;
> > +
> > +		list_for_each_entry(vma, &obj->vma_list, vma_link) {
> > +			if (!drm_mm_node_allocated(&vma->node))
> > +				continue;
> > +
> > +			WARN_ON(i915_vma_bind(vma,
> > +					      obj->cache_level,
> > +					      PIN_UPDATE));
> > +		}
> > +	} else
> > +		list_del(&obj->global_list);
> > +
> > +	/* drop the stolen pin and backing */
> > +	shmemfs_pages = obj->pages;
> > +	obj->pages = stolen_pages;
> > +
> > +	i915_gem_object_unpin_pages(obj);
> > +	obj->ops->put_pages(obj);
> > +	if (obj->ops->release)
> > +		obj->ops->release(obj);
> > +
> > +	obj->ops = &i915_gem_object_ops;
> > +	obj->pages = shmemfs_pages;
> > +
> > +	return 0;
> > +
> > +err_node:
> > +	wmb();
> > +	i915->gtt.base.clear_range(&i915->gtt.base,
> > +				   node.start, node.size,
> > +				   true);
> > +	drm_mm_remove_node(&node);
> > +err_file:
> > +	fput(file);
> > +	obj->base.filp = NULL;
> > +	return ret;
> > +}
> > +
> > +int
> > +i915_gem_freeze(struct drm_device *dev)
> > +{
> > +	/* Called before i915_gem_suspend() when hibernating */
> > +	struct drm_i915_private *i915 = to_i915(dev);
> > +	struct drm_i915_gem_object *obj, *tmp;
> > +	struct list_head *phase[] = {
> > +		&i915->mm.unbound_list, &i915->mm.bound_list, NULL
> > +	}, **p;
> > +
> > +	/* Across hibernation, the stolen area is not preserved.
> > +	 * Anything inside stolen must copied back to normal
> > +	 * memory if we wish to preserve it.
> > +	 */
> > +	for (p = phase; *p; p++) {
> 
> Didn't we introduce a list of stolen objects in one of the other
> patches?

Yes, but that list is only for purgeable objects.

+       /**
+        * List of stolen objects that have been marked as purgeable and
+        * thus available for reaping if we need more space for a new
+        * allocation. Ordered by time of marking purgeable.
+        */
+       struct list_head stolen_list;
+

Thanks,
Ankit



More information about the Intel-gfx mailing list