[Intel-gfx] [PATCH 6/6] drm/i915: Migrate stolen objects before hibernation

Chris Wilson chris at chris-wilson.co.uk
Thu Oct 8 04:02:23 PDT 2015


On Thu, Oct 08, 2015 at 11:54:29AM +0530, ankitprasad.r.sharma at intel.com wrote:
> +	/* stolen objects are already pinned to prevent shrinkage */
> +	memset(&node, 0, sizeof(node));
> +	ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm,
> +						  &node,
> +						  4096, 0, I915_CACHE_NONE,
> +						  0, i915->gtt.mappable_end,
> +						  DRM_MM_SEARCH_DEFAULT,
> +						  DRM_MM_CREATE_DEFAULT);
> +	if (ret)
> +		return ret;
> +
> +	i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages,
> +				      node.start, I915_CACHE_NONE, 0);

This was written using an insert_page() function you don't have. Either
grab that as well, or you need to pin the entire object into the GGTT,
i.e. i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); note that to do so
will also need to be very careful to handle the pinning of obj->pages
and the introduction of a new GGTT vma.

> +
> +	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
> +		struct page *page;
> +		void *__iomem src;
> +		void *dst;
> +
> +		page = shmem_read_mapping_page(mapping, i);
> +		if (IS_ERR(page)) {
> +			ret = PTR_ERR(page);
> +			goto err_node;
> +		}
> +
> +		src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start + PAGE_SIZE * i);
> +		dst = kmap_atomic(page);
> +		memcpy_fromio(dst, src, PAGE_SIZE);
> +		kunmap_atomic(dst);
> +		io_mapping_unmap_atomic(src);
> +
> +		page_cache_release(page);
> +	}
> +
> +	wmb();
> +	i915->gtt.base.clear_range(&i915->gtt.base,
> +				   node.start, node.size,
> +				   true);
> +	drm_mm_remove_node(&node);
> +
> +swap_pages:
> +	stolen_pages = obj->pages;
> +	obj->pages = NULL;
> +
> +	obj->base.filp = file;
> +	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
> +	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
> +
> +	/* Recreate any pinned binding with pointers to the new storage */
> +	if (!list_empty(&obj->vma_list)) {
> +		ret = i915_gem_object_get_pages_gtt(obj);
> +		if (ret) {
> +			obj->pages = stolen_pages;
> +			goto err_file;
> +		}
> +
> +		ret = i915_gem_gtt_prepare_object(obj);
> +		if (ret) {
> +			i915_gem_object_put_pages_gtt(obj);
> +			obj->pages = stolen_pages;
> +			goto err_file;
> +		}
> +
> +		ret = i915_gem_object_set_to_gtt_domain(obj, true);
> +		if (ret) {
> +			i915_gem_gtt_finish_object(obj);
> +			i915_gem_object_put_pages_gtt(obj);
> +			obj->pages = stolen_pages;
> +			goto err_file;
> +		}
> +
> +		obj->get_page.sg = obj->pages->sgl;
> +		obj->get_page.last = 0;
> +
> +		list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +			if (!drm_mm_node_allocated(&vma->node))
> +				continue;
> +
> +			WARN_ON(i915_vma_bind(vma,
> +					      obj->cache_level,
> +					      PIN_UPDATE));
> +		}
> +	} else
> +		list_del(&obj->global_list);
> +
> +	/* drop the stolen pin and backing */
> +	shmemfs_pages = obj->pages;
> +	obj->pages = stolen_pages;
> +
> +	i915_gem_object_unpin_pages(obj);
> +	obj->ops->put_pages(obj);
> +	if (obj->ops->release)
> +		obj->ops->release(obj);
> +
> +	obj->ops = &i915_gem_object_ops;
> +	obj->pages = shmemfs_pages;
> +
> +	return 0;
> +
> +err_node:
> +	wmb();
> +	i915->gtt.base.clear_range(&i915->gtt.base,
> +				   node.start, node.size,
> +				   true);
> +	drm_mm_remove_node(&node);
> +err_file:
> +	fput(file);
> +	obj->base.filp = NULL;
> +	return ret;
> +}
> +
> +int
> +i915_gem_freeze(struct drm_device *dev)
> +{
> +	/* Called before i915_gem_suspend() when hibernating */
> +	struct drm_i915_private *i915 = to_i915(dev);
> +	struct drm_i915_gem_object *obj, *tmp;
> +	struct list_head *phase[] = {
> +		&i915->mm.unbound_list, &i915->mm.bound_list, NULL
> +	}, **p;
> +
> +	/* Across hibernation, the stolen area is not preserved.
> +	 * Anything inside stolen must copied back to normal
> +	 * memory if we wish to preserve it.
> +	 */
> +	for (p = phase; *p; p++) {

Didn't we introduce a list of stolen objects in one of the other
patches?

> +		struct list_head migrate;
> +		int ret;
> +
> +		INIT_LIST_HEAD(&migrate);
> +		list_for_each_entry_safe(obj, tmp, *p, global_list) {
> +			if (obj->stolen == NULL)
> +				continue;
> +
> +			if (obj->internal_volatile)
> +				continue;
> +
> +			/* In the general case, this object may only be alive
> +			 * due to an active reference, and that may disappear
> +			 * when we unbind any of the objects (and so wait upon
> +			 * the GPU and retire requests). To prevent one of the
> +			 * objects from disappearing beneath us, we need to
> +			 * take a reference to each as we build the migration
> +			 * list.
> +			 *
> +			 * This is similar to the strategy required whilst
> +			 * shrinking or evicting objects (for the same reason).
> +			 */
> +			drm_gem_object_reference(&obj->base);
> +			list_move(&obj->global_list, &migrate);
> +		}
> +
> +		ret = 0;
> +		list_for_each_entry_safe(obj, tmp, &migrate, global_list) {
> +			if (ret == 0)
> +				ret = i915_gem_object_migrate_stolen_to_shmemfs(obj);
> +			drm_gem_object_unreference(&obj->base);
> +		}
> +		list_splice(&migrate, *p);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return 0;
> +}

-- 
Chris Wilson, Intel Open Source Technology Centre


More information about the Intel-gfx mailing list