[Intel-gfx] [RFC] drm/i915: Support creating a proxy object that links to other object's pages

Chris Wilson chris at chris-wilson.co.uk
Thu Dec 7 15:46:29 UTC 2017


Sometimes it is useful to work with arbitrarily large surfaces, that
exceed the limits of various bits of HW. For instance, you may be
creating a mega-texture, a single very large texture with essentially
infinite dimensions, but need to normal textures out of it for sampling
by hardware. As the texture is too big for the HW, one cannot simply use
the GPU to copy chunks out of the huge texture into a smaller version,
instead much find some other indexing trick. The proxy object presents
one such trick. It allows us to create an object with pages of an other,
in an arbitrary order, thus we are able to create a window into the
larger object that can fit within the restrictions of HW, all without
copying any data and just manipulating the GPU page tables.

A similar situation often arises with framebuffers. It is convenient to
address the entire frontbuffer as a single continuous surface; but that
may be much too large for the scanout to handle (e.g. 3x4K monitors, let
alone adding in virtual or prime outputs). Instead of breaking up the
contiguous frontbuffer (ala per-crtc-pixmaps), we can carve smaller
per-crtc-framebuffers out of the larger using a proxy surface and
redirecting the page tables to refer back to the larger surface.

To support such page table manipulations, we introduce a scratch proxy
object. It can be created with arbitrary size (give or take the usual
u64/size_t restrictions), and by default every page index links to the
same scratch page (i.e. you can create a 128PiB object backed by only
4KiB). Through the use of DRM_I915_GEM_SET_PAGES ioctl, different
indices within the proxy object can be redirected to different locations
within a target object. The ioctl takes a 2D strided array as it
description which should facilitate constructing the most common layouts.

Opens:
- userptr needs to revoke proxy attachments
- userspace demonstrator
- explicit selftests to cover the different cases of link_node(), atm we
  rely on the random walk of the smoketest to find all the corner cases

Suggested-by: Kristian Høgsberg <hoegsberg at gmail.com>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Kristian Høgsberg <hoegsberg at gmail.com>
Cc: Matthew Auld <matthew.william.auld at gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/Makefile                      |   1 +
 drivers/gpu/drm/i915/i915_drv.c                    |   2 +
 drivers/gpu/drm/i915/i915_drv.h                    |   2 +
 drivers/gpu/drm/i915/i915_gem.c                    |  98 +++-
 drivers/gpu/drm/i915/i915_gem_object.h             |  16 +-
 drivers/gpu/drm/i915/i915_gem_scratch.c            | 373 ++++++++++++++
 drivers/gpu/drm/i915/selftests/huge_gem_object.c   |   6 +-
 drivers/gpu/drm/i915/selftests/huge_gem_object.h   |   2 +-
 drivers/gpu/drm/i915/selftests/i915_gem_object.c   |   2 +-
 drivers/gpu/drm/i915/selftests/i915_gem_scratch.c  | 533 +++++++++++++++++++++
 .../gpu/drm/i915/selftests/i915_live_selftests.h   |   1 +
 .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   1 +
 include/uapi/drm/i915_drm.h                        |  44 +-
 13 files changed, 1065 insertions(+), 16 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_gem_scratch.c
 create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_scratch.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index bdf8724ac0e1..baa7794952c1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -69,6 +69,7 @@ i915-y += i915_cmd_parser.o \
 	  i915_gem_object.o \
 	  i915_gem_render_state.o \
 	  i915_gem_request.o \
+	  i915_gem_scratch.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cbe7f2d969cc..abd8d9fd9aca 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -378,6 +378,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		}
 		break;
 
+	case I915_PARAM_CREATE_VERSION:
 	case I915_PARAM_MMAP_VERSION:
 		/* Remember to bump this if the version changes! */
 	case I915_PARAM_HAS_GEM:
@@ -2814,6 +2815,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_PAGES, i915_gem_set_pages_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c4f88a9a4a53..e06d276c82d7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3489,6 +3489,8 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv);
+int i915_gem_set_pages_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d43d7a8acac3..db1885a2ee80 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -595,19 +595,23 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
 static int
 i915_gem_create(struct drm_file *file,
 		struct drm_i915_private *dev_priv,
-		uint64_t size,
-		uint32_t *handle_p)
+		u64 size,
+		unsigned long flags,
+		u32 *handle_p)
 {
 	struct drm_i915_gem_object *obj;
 	int ret;
 	u32 handle;
 
-	size = roundup(size, PAGE_SIZE);
+	size = round_up(size, PAGE_SIZE);
 	if (size == 0)
 		return -EINVAL;
 
 	/* Allocate the new object */
-	obj = i915_gem_object_create(dev_priv, size);
+	if (flags & I915_GEM_CREATE_SCRATCH)
+		obj = i915_gem_object_create_scratch(dev_priv, size);
+	else
+		obj = i915_gem_object_create(dev_priv, size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -629,8 +633,8 @@ i915_gem_dumb_create(struct drm_file *file,
 	/* have to work out size/pitch and return them */
 	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
 	args->size = args->pitch * args->height;
-	return i915_gem_create(file, to_i915(dev),
-			       args->size, &args->handle);
+	return i915_gem_create(file, to_i915(dev), args->size, 0,
+			       &args->handle);
 }
 
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -650,12 +654,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_create *args = data;
+	struct drm_i915_gem_create_v2 *args = data;
+
+	if (args->flags & ~I915_GEM_CREATE_SCRATCH)
+		return -EINVAL;
 
 	i915_gem_flush_free_objects(dev_priv);
 
-	return i915_gem_create(file, dev_priv,
-			       args->size, &args->handle);
+	return i915_gem_create(file, dev_priv, args->size, args->flags,
+			       &args->handle);
 }
 
 static inline enum fb_op_origin
@@ -4454,6 +4461,79 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	return err;
 }
 
+int
+i915_gem_set_pages_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_i915_gem_set_pages *arg = data;
+	struct drm_i915_gem_object *dst, *src;
+	u64 length;
+	int err;
+
+	if (!arg->width || !arg->height)
+		return 0;
+
+	if (arg->dst_handle == arg->src_handle)
+		return -EINVAL;
+
+	if (!arg->dst_stride)
+		arg->dst_stride = arg->width;
+	if (arg->width > arg->dst_stride)
+		return -EINVAL;
+
+	if (!arg->src_stride)
+		arg->src_stride = arg->width;
+	if (arg->width > arg->src_stride)
+		return -EINVAL;
+
+	if (arg->dst_stride > U64_MAX / (arg->height - 1))
+		return -EINVAL;
+
+	dst = i915_gem_object_lookup(file_priv, arg->dst_handle);
+	if (!dst)
+		return -ENOENT;
+
+	length = arg->dst_stride * (arg->height - 1);
+	if (add_overflows(length, arg->width))
+		return -EINVAL;
+
+	if (range_overflows_t(u64,
+			      arg->dst_offset, length + arg->width,
+			      dst->base.size >> PAGE_SHIFT)) {
+		err = -EINVAL;
+		goto out_dst;
+	}
+
+	src = i915_gem_object_lookup(file_priv, arg->src_handle);
+	if (!src) {
+		err = -ENOENT;
+		goto out_dst;
+	}
+
+	length = arg->src_stride * (arg->height - 1);
+	if (add_overflows(length, arg->width)) {
+		err = -EINVAL;
+		goto out_src;
+	}
+
+	if (range_overflows_t(u64,
+			      arg->src_offset, length + arg->width,
+			      src->base.size >> PAGE_SHIFT)) {
+		err = -EINVAL;
+		goto out_src;
+	}
+
+	err = i915_gem_scratch_set_pages(dst, arg->dst_offset, arg->dst_stride,
+					 src, arg->src_offset, arg->src_stride,
+					 arg->width, arg->height);
+
+out_src:
+	i915_gem_object_put(src);
+out_dst:
+	i915_gem_object_put(dst);
+	return err;
+}
+
 static void
 frontbuffer_retire(struct i915_gem_active *active,
 		   struct drm_i915_gem_request *request)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 5e54f910db39..dcbd5983eb64 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -261,7 +261,12 @@ struct drm_i915_gem_object {
 			struct work_struct *work;
 		} userptr;
 
-		unsigned long scratch;
+		struct i915_gem_scratch {
+			struct rb_root_cached links;
+			struct page *page;
+		} scratch;
+
+		unsigned long datum;
 
 		void *gvt_info;
 	};
@@ -485,5 +490,14 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 					 unsigned int cache_level);
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 
+struct drm_i915_gem_object *
+i915_gem_object_create_scratch(struct drm_i915_private *i915, u64 size);
+
+int i915_gem_scratch_set_pages(struct drm_i915_gem_object *dst,
+			       u64 dst_offset, u64 dst_stride,
+			       struct drm_i915_gem_object *src,
+			       u64 src_offset, u64 src_stride,
+			       u64 width, u64 height);
+
 #endif
 
diff --git a/drivers/gpu/drm/i915/i915_gem_scratch.c b/drivers/gpu/drm/i915/i915_gem_scratch.c
new file mode 100644
index 000000000000..3cc7ff4286b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_scratch.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/interval_tree.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+struct i915_gem_scratch_link {
+	struct interval_tree_node it;
+	struct drm_i915_gem_object *target;
+	u64 target_index;
+
+	struct list_head link;
+};
+
+static int i915_gem_scratch_get_pages(struct drm_i915_gem_object *obj)
+{
+	unsigned int max_segment = i915_sg_segment_size();
+	const int num_pages = obj->base.size >> PAGE_SHIFT;
+	const struct rb_node *rb;
+	struct sg_table *st;
+	struct page **pvec;
+	u64 index;
+	int err;
+
+	lockdep_assert_held(&obj->mm.lock);
+
+	pvec = kvmalloc_array(num_pages, sizeof(*pvec), GFP_KERNEL);
+	if (!pvec)
+		return -ENOMEM;
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st) {
+		err = -ENOMEM;
+		goto err_pvec;
+	}
+
+	index = 0;
+	for (rb = rb_first_cached(&obj->scratch.links); rb; rb = rb_next(rb)) {
+		const struct i915_gem_scratch_link *link =
+			rb_entry(rb, typeof(*link), it.rb);
+		u64 src;
+
+		GEM_BUG_ON(index > link->it.start);
+		GEM_BUG_ON(link->it.last >= num_pages);
+
+		for (; index < link->it.start; index++)
+			pvec[index] = obj->scratch.page;
+
+		err = i915_gem_object_pin_pages(link->target);
+		if (err)
+			goto err_st;
+
+		src = link->target_index;
+		for (; index <= link->it.last; index++) {
+			pvec[index] = i915_gem_object_get_page(link->target,
+							       src++);
+			get_page(pvec[index]);
+		}
+
+		i915_gem_object_unpin_pages(link->target);
+	}
+	for (; index < num_pages; index++)
+		pvec[index] = obj->scratch.page;
+
+rebuild_sg:
+	err = __sg_alloc_table_from_pages(st, pvec, num_pages,
+					  0, num_pages << PAGE_SHIFT,
+					  max_segment, GFP_KERNEL);
+	if (err)
+		goto err_st;
+
+	err = i915_gem_gtt_prepare_pages(obj, st);
+	if (err) {
+		if (max_segment > PAGE_SIZE) {
+			max_segment = PAGE_SIZE;
+			goto rebuild_sg;
+		}
+		goto err_sg;
+	}
+
+	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(st->sgl));
+
+	kvfree(pvec);
+	return 0;
+
+err_sg:
+	sg_free_table(st);
+err_st:
+	while (index--) {
+		if (pvec[index] != obj->scratch.page)
+			put_page(pvec[index]);
+	}
+	kfree(st);
+err_pvec:
+	kvfree(pvec);
+	return err;
+}
+
+static void
+i915_gem_scratch_put_pages(struct drm_i915_gem_object *obj,
+			   struct sg_table *pages)
+{
+	struct sgt_iter sgt_iter;
+	struct page *page;
+
+	if (obj->mm.madv != I915_MADV_WILLNEED)
+		obj->mm.dirty = false;
+
+	i915_gem_gtt_finish_pages(obj, pages);
+
+	for_each_sgt_page(page, sgt_iter, pages) {
+		if (page == obj->scratch.page)
+			continue;
+
+		if (obj->mm.dirty)
+			set_page_dirty(page);
+
+		mark_page_accessed(page);
+		put_page(page);
+	}
+	obj->mm.dirty = false;
+
+	sg_free_table(pages);
+	kfree(pages);
+}
+
+static void
+i915_gem_scratch_release(struct drm_i915_gem_object *obj)
+{
+	struct i915_gem_scratch_link *link, *n;
+
+	rbtree_postorder_for_each_entry_safe(link, n,
+					     &obj->scratch.links.rb_root,
+					     it.rb) {
+		i915_gem_object_put(link->target);
+		kfree(link);
+	}
+
+	put_page(obj->scratch.page);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_scratch_ops = {
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+		 I915_GEM_OBJECT_IS_SHRINKABLE,
+	.get_pages = i915_gem_scratch_get_pages,
+	.put_pages = i915_gem_scratch_put_pages,
+	.release = i915_gem_scratch_release,
+};
+
+struct drm_i915_gem_object *
+i915_gem_object_create_scratch(struct drm_i915_private *i915, u64 size)
+{
+	struct drm_i915_gem_object *obj;
+
+	if (overflows_type(size, obj->base.size))
+		return ERR_PTR(-E2BIG);
+
+	obj = i915_gem_object_alloc(i915);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &i915_gem_scratch_ops);
+	obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+	obj->scratch.links = RB_ROOT_CACHED;
+	obj->scratch.page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+	return obj;
+}
+
+static int split_link(struct drm_i915_gem_object *dst,
+		      struct i915_gem_scratch_link *link,
+		      u64 start, u64 last)
+{
+	if (start <= link->it.start && last >= link->it.last) {
+		i915_gem_object_put(link->target);
+		kfree(link);
+	} else if (start > link->it.start && last < link->it.last) {
+		struct i915_gem_scratch_link old = *link;
+
+		link->it.last = start - 1;
+		GEM_BUG_ON(link->it.last < link->it.start);
+		interval_tree_insert(&link->it, &dst->scratch.links);
+
+		link = kmalloc(sizeof(*link), GFP_KERNEL);
+		if (!link)
+			return -ENOMEM;
+
+		link->it.start = last + 1;
+		link->it.last = old.it.last;
+		GEM_BUG_ON(link->it.last < link->it.start);
+		interval_tree_insert(&link->it, &dst->scratch.links);
+
+		link->target = i915_gem_object_get(old.target);
+		link->target_index = old.target_index;
+		link->target_index += link->it.start - old.it.start;
+	} else if (start > link->it.start) {
+		GEM_BUG_ON(link->it.last > last);
+
+		link->it.last = start - 1;
+		GEM_BUG_ON(link->it.last < link->it.start);
+		interval_tree_insert(&link->it, &dst->scratch.links);
+	} else {
+		u64 old_it_start = link->it.start;
+
+		GEM_BUG_ON(link->it.start < start);
+
+		link->it.start = last + 1;
+		GEM_BUG_ON(link->it.last < link->it.start);
+		interval_tree_insert(&link->it, &dst->scratch.links);
+		link->target_index += link->it.start - old_it_start;
+	}
+
+	return 0;
+}
+
+int i915_gem_scratch_set_pages(struct drm_i915_gem_object *dst,
+			       u64 dst_offset, u64 dst_stride,
+			       struct drm_i915_gem_object *src,
+			       u64 src_offset, u64 src_stride,
+			       u64 width, u64 height)
+{
+	struct drm_i915_private *i915 = to_i915(dst->base.dev);
+	int err;
+
+	GEM_BUG_ON(width == 0 || height == 0);
+
+	if (dst->ops != &i915_gem_scratch_ops)
+		return -EINVAL;
+
+	if (src) {
+		if (src->ops == &i915_gem_scratch_ops)
+			return -EINVAL;
+
+		if (!i915_gem_object_has_struct_page(src))
+			return -EINVAL;
+	}
+
+	/*
+	 * Invalidate the existing instantation of dst->mm.pages, setting
+	 * dst->mm.page = NULL and forcing all users to reacquire them,
+	 * thereby calling get_pages and acquiring the new page tree.
+	 */
+	mutex_lock(&dst->mm.lock);
+	if (i915_gem_object_has_pages(dst)) {
+		mutex_unlock(&dst->mm.lock);
+
+		err = i915_gem_object_wait(dst,
+					   I915_WAIT_INTERRUPTIBLE |
+					   I915_WAIT_ALL,
+					   MAX_SCHEDULE_TIMEOUT,
+					   NULL);
+		if (err)
+			return err;
+
+		err = mutex_lock_interruptible(&i915->drm.struct_mutex);
+		if (err)
+			return err;
+
+		err = i915_gem_object_unbind(dst);
+		if (err) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return err;
+		}
+
+		__i915_gem_object_put_pages(dst, I915_MM_NORMAL);
+
+		/* Don't talk to me about fairness. */
+		mutex_lock(&dst->mm.lock);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		/* We lost the race; don't ask. */
+		if (i915_gem_object_has_pages(dst)) {
+			mutex_unlock(&dst->mm.lock);
+			return -EBUSY;
+		}
+	}
+
+	if (dst_stride == width && src_stride == width) {
+		width *= height;
+		height = 1;
+	}
+
+	GEM_BUG_ON(i915_gem_object_has_pages(dst));
+	while (height--) {
+		struct i915_gem_scratch_link *link;
+		struct interval_tree_node *it;
+		u64 start = dst_offset;
+		u64 last = start + width - 1;
+
+		GEM_BUG_ON(overflows_type(start, link->it.start));
+		GEM_BUG_ON(overflows_type(last, link->it.last));
+
+		GEM_BUG_ON(dst_offset + width > dst->base.size >> PAGE_SHIFT);
+
+		it = interval_tree_iter_first(&dst->scratch.links, start, last);
+		if (it) {
+			struct i915_gem_scratch_link *next;
+			LIST_HEAD(list);
+
+			do {
+				link = container_of(it, typeof(*link), it);
+				list_add(&link->link, &list);
+			} while ((it = interval_tree_iter_next(it, start, last)));
+
+			list_for_each_entry_safe(link, next, &list, link) {
+				interval_tree_remove(&link->it,
+						     &dst->scratch.links);
+
+				err = split_link(dst, link, start, last);
+				if (err)
+					goto unlock;
+			}
+		}
+
+		if (src) {
+			GEM_BUG_ON(src_offset + width > src->base.size >> PAGE_SHIFT);
+
+			link = kmalloc(sizeof(*link), GFP_KERNEL);
+			if (!link) {
+				err = -ENOMEM;
+				goto unlock;
+			}
+
+			link->it.start = start;
+			link->it.last = last;
+			GEM_BUG_ON(link->it.last < link->it.start);
+			interval_tree_insert(&link->it, &dst->scratch.links);
+
+			link->target = i915_gem_object_get(src);
+			link->target_index = src_offset;
+		}
+
+		dst_offset += dst_stride;
+		src_offset += src_stride;
+	}
+
+unlock:
+	mutex_unlock(&dst->mm.lock);
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_scratch.c"
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index a2632df39173..bba83ce0b601 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@
 static void huge_free_pages(struct drm_i915_gem_object *obj,
 			    struct sg_table *pages)
 {
-	unsigned long nreal = obj->scratch / PAGE_SIZE;
+	unsigned long nreal = obj->datum / PAGE_SIZE;
 	struct scatterlist *sg;
 
 	for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
@@ -40,7 +40,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
 static int huge_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
-	const unsigned long nreal = obj->scratch / PAGE_SIZE;
+	const unsigned long nreal = obj->datum / PAGE_SIZE;
 	const unsigned long npages = obj->base.size / PAGE_SIZE;
 	struct scatterlist *sg, *src, *end;
 	struct sg_table *pages;
@@ -133,7 +133,7 @@ huge_gem_object(struct drm_i915_private *i915,
 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 	i915_gem_object_set_cache_coherency(obj, cache_level);
-	obj->scratch = phys_size;
+	obj->datum = phys_size;
 
 	return obj;
 }
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
index a6133a9e8029..f7a1f06b8a7f 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.h
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
@@ -33,7 +33,7 @@ huge_gem_object(struct drm_i915_private *i915,
 static inline phys_addr_t
 huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
 {
-	return obj->scratch;
+	return obj->datum;
 }
 
 static inline dma_addr_t
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index f32aa6bb79e2..a5a0d299d6bb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -200,7 +200,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
 				 const struct tile *tile,
 				 unsigned long end_time)
 {
-	const unsigned int nreal = obj->scratch / PAGE_SIZE;
+	const unsigned int nreal = obj->datum / PAGE_SIZE;
 	const unsigned long npages = obj->base.size / PAGE_SIZE;
 	struct i915_vma *vma;
 	unsigned long page;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_scratch.c b/drivers/gpu/drm/i915/selftests/i915_gem_scratch.c
new file mode 100644
index 000000000000..4ea7ac939353
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_scratch.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+#include "huge_gem_object.h"
+
+static int assert_page(struct drm_i915_gem_object *obj,
+		       unsigned long index,
+		       struct page *page)
+{
+	int err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("%pS: Failed to pin pages for scratch object, err=%d\n",
+		       __builtin_return_address(0), err);
+		return err;
+	}
+
+	if (i915_gem_object_get_page(obj, index) != page) {
+		pr_err("%pS: page[%lu]=%p did not point to the desired page=%p (scratch.page=%p)\n",
+		       __builtin_return_address(0), index,
+		       i915_gem_object_get_page(obj, index), page,
+		       obj->scratch.page);
+		err = -EINVAL;
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static __always_inline int
+assert_scratch_page(struct drm_i915_gem_object *obj, unsigned long index)
+{
+	return assert_page(obj, index, obj->scratch.page);
+}
+
+static int igt_gem_scratch_empty(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	int err;
+
+	/* Basic test to ensure we can create an object */
+
+	obj = i915_gem_object_create_scratch(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	err = assert_scratch_page(obj, 0);
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_linked(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src;
+	struct page *page;
+	int err;
+
+	/* Basic test to ensure we can create a linked object */
+
+	obj = i915_gem_object_create_scratch(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	err = assert_scratch_page(obj, 0);
+	if (err)
+		goto out_obj;
+
+	src = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(src)) {
+		err = PTR_ERR(obj);
+		pr_err("create-internal failed, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_object_pin_pages(src);
+	if (err) {
+		pr_err("Failed to pin source pages\n");
+		i915_gem_object_put(src);
+		goto out_obj;
+	}
+	page = i915_gem_object_get_page(src, 0);
+
+	err = i915_gem_scratch_set_pages(obj, 0, 0, src, 0, 0, 1, 1);
+	if (err == 0)
+		err = assert_page(obj, 0, page);
+
+	i915_gem_object_unpin_pages(src);
+	i915_gem_object_put(src);
+out_obj:
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_clear(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src;
+	struct page *page;
+	int err;
+
+	obj = i915_gem_object_create_scratch(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	err = assert_scratch_page(obj, 0);
+	if (err)
+		goto out_obj;
+
+	src = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(src)) {
+		err = PTR_ERR(obj);
+		pr_err("create-internal failed, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_object_pin_pages(src);
+	if (err) {
+		pr_err("Failed to pin source pages\n");
+		i915_gem_object_put(src);
+		goto out_obj;
+	}
+	page = i915_gem_object_get_page(src, 0);
+
+	err = i915_gem_scratch_set_pages(obj, 0, 0, src, 0, 0, 1, 1);
+	if (err == 0)
+		err = assert_page(obj, 0, page);
+
+	i915_gem_object_unpin_pages(src);
+	i915_gem_object_put(src);
+	if (err)
+		goto out_obj;
+
+	err = i915_gem_scratch_set_pages(obj, 0, 0, NULL, 0, 0, 1, 1);
+	if (err) {
+		pr_err("Failed to unlink pages, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = assert_scratch_page(obj, 0);
+
+out_obj:
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_1d(void *arg)
+{
+	const unsigned long npages = 1024;
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src;
+	unsigned long index;
+	int err;
+
+	obj = i915_gem_object_create_scratch(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	src = i915_gem_object_create_internal(i915, npages * PAGE_SIZE);
+	if (IS_ERR(src)) {
+		err = PTR_ERR(obj);
+		pr_err("create-internal failed, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_object_pin_pages(src);
+	if (err) {
+		pr_err("Failed to pin pages for target object, err=%d\n", err);
+		goto out_src;
+	}
+
+	for (index = 0; err == 0 && index < npages; index++) {
+		err = i915_gem_scratch_set_pages(obj, 0, 0,
+						 src, index, 0,
+						 1, 1);
+		if (err) {
+			pr_err("Failed to link pages, err=%d\n", err);
+			goto out_src_unpin;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err) {
+			pr_err("Failed to pin pages for linked scratch object, err=%d\n", err);
+			goto out_src_unpin;
+		}
+
+		if (i915_gem_object_get_page(obj, 0) == obj->scratch.page) {
+			pr_err("Returned scratch page and not linked page!\n");
+			err = -EINVAL;
+			goto out_unpin;
+		}
+
+		if (i915_gem_object_get_page(obj, 0) !=
+		    i915_gem_object_get_page(src, index)) {
+			pr_err("Unexpected linked page found|\n");
+			err = -EINVAL;
+			goto out_unpin;
+		}
+out_unpin:
+		i915_gem_object_unpin_pages(obj);
+	}
+
+out_src_unpin:
+	i915_gem_object_unpin_pages(src);
+out_src:
+	i915_gem_object_put(src);
+out_obj:
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_single(void *arg)
+{
+	const unsigned long npages = 1024;
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src;
+	unsigned long index;
+	int err;
+
+	obj = i915_gem_object_create_scratch(i915, npages * PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	src = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(src)) {
+		err = PTR_ERR(obj);
+		pr_err("create-internal failed, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_scratch_set_pages(obj, 0, 1, src, 0, 0, 1, npages);
+	i915_gem_object_put(src);
+	if (err) {
+		pr_err("Failed to link pages, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("Failed to pin pages for linked scratch object, err=%d\n", err);
+		goto out_obj;
+	}
+
+	for (index = 0; index < npages; index++) {
+		if (i915_gem_object_get_page(obj, index) == obj->scratch.page) {
+			pr_err("[%lu] Returned scratch page and not linked page!\n",
+			       index);
+			err = -EINVAL;
+			goto out_unpin;
+		}
+
+		if (i915_gem_object_get_page(obj, index) !=
+		    i915_gem_object_get_page(obj, 0)) {
+			pr_err("[%lu] Returned different page and not linked page!\n",
+			       index);
+			err = -EINVAL;
+			goto out_unpin;
+		}
+	}
+
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_obj:
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_repeat(void *arg)
+{
+	const unsigned long npages = 1024;
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src;
+	unsigned long index;
+	int err;
+
+	obj = i915_gem_object_create_scratch(i915, npages * PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	src = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(src)) {
+		err = PTR_ERR(obj);
+		pr_err("create-internal failed, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_scratch_set_pages(obj, 0, 1, src, 0, 0, 1, npages);
+	i915_gem_object_put(src);
+	if (err) {
+		pr_err("Failed to link pages, err=%d\n", err);
+		goto out_obj;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("Failed to pin pages for linked scratch object, err=%d\n", err);
+		goto out_obj;
+	}
+
+	for (index = 0; index < npages; index++) {
+		if (i915_gem_object_get_page(obj, index) == obj->scratch.page) {
+			pr_err("[%lu] Returned scratch page and not linked page!\n",
+			       index);
+			err = -EINVAL;
+			goto out_unpin;
+		}
+
+		if (i915_gem_object_get_page(obj, index) !=
+		    i915_gem_object_get_page(obj, 0)) {
+			pr_err("[%lu] Returned different page and not linked page!\n",
+			       index);
+			err = -EINVAL;
+			goto out_unpin;
+		}
+	}
+
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_obj:
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+static int igt_gem_scratch_smoketest(void *arg)
+{
+	const unsigned long npages = 512; /* max mem usage is npages*npages */
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *src, *n;
+	IGT_TIMEOUT(end_time);
+	I915_RND_STATE(rnd);
+	RADIX_TREE(pages, GFP_KERNEL);
+	LIST_HEAD(source_list);
+	unsigned long index;
+	int err;
+
+	obj = i915_gem_object_create_scratch(i915, npages * PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		pr_err("create-scratch failed, err=%d\n", err);
+		goto out;
+	}
+
+	for (index = 0; index < npages; index++)
+		radix_tree_insert(&pages, index, obj->scratch.page);
+
+	do {
+		unsigned int dst_stride, src_stride, height, width;
+		unsigned int dst_offset, src_offset;
+		unsigned int x, y;
+
+		dst_stride = i915_prandom_u32_max_state(npages, &rnd);
+		src_stride = i915_prandom_u32_max_state(npages, &rnd);
+		width = i915_prandom_u32_max_state(min(dst_stride, src_stride), &rnd) + 1;
+		height = i915_prandom_u32_max_state(npages / max(dst_stride, src_stride), &rnd) + 1;
+
+		dst_offset = i915_prandom_u32_max_state(npages - dst_stride * (height - 1) - width, &rnd);
+		src_offset = i915_prandom_u32_max_state(npages - src_stride * (height - 1) - width, &rnd);
+
+		if (prandom_u32_state(&rnd) % 4 == 0) {
+			pr_debug("Inserting new object dst=(.offset=%d, .stride=%d), src=(.offset=%d, .stride=%d), size=%dx%d\n",
+				 dst_offset, dst_stride,
+				 src_offset, src_stride,
+				 width, height);
+			src = i915_gem_object_create_internal(i915, npages * PAGE_SIZE);
+			if (IS_ERR(src)) {
+				err = PTR_ERR(obj);
+				pr_err("create-internal failed, err=%d\n", err);
+				goto out_obj;
+			}
+
+			err = i915_gem_object_pin_pages(src);
+			if (err) {
+				if (err == -ENOSPC) {
+					/* Suppress the inevitable
+					 * out-of-SWIOTLB space and just bail.
+					 */
+					err = 0;
+				} else  {
+					pr_err("Failed to pin source pages, err=%d\n", err);
+				}
+				i915_gem_object_put(src);
+				goto out_obj;
+			}
+			for (y = 0; y < height; y++) {
+				unsigned s = src_offset + src_stride * y;
+				unsigned d = dst_offset + dst_stride * y;
+				for (x = 0; x < width; x++) {
+					radix_tree_delete(&pages, d + x);
+					radix_tree_insert(&pages, d + x,
+							  i915_gem_object_get_page(src, s + x));
+				}
+			}
+
+			/* Keep the pages pinned to avoid migration */
+			list_add(&src->st_link, &source_list);
+		} else {
+			pr_debug("Clearing region %dx%d, offset=%d, stride=%d\n",
+				 width, height, dst_offset, dst_stride);
+			src = NULL;
+			for (y = 0; y < height; y++) {
+				unsigned d = dst_offset + dst_stride * y;
+				for (x = 0; x < width; x++) {
+					radix_tree_delete(&pages, d + x);
+					radix_tree_insert(&pages, d + x,
+							  obj->scratch.page);
+				}
+			}
+		}
+
+		err = i915_gem_scratch_set_pages(obj, dst_offset, dst_stride,
+						 src, src_offset, src_stride,
+						 width, height);
+		if (err) {
+			pr_err("Failed to link pages, err=%d\n", err);
+			goto out_obj;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err) {
+			if (err == -ENOSPC) {
+				/* Suppress the inevitable
+				 * out-of-SWIOTLB space and just bail.
+				 */
+				err = 0;
+			} else  {
+				pr_err("Failed to pin target pages, err=%d\n",
+				       err);
+			}
+			goto out_obj;
+		}
+
+		for (index = 0; index < npages; index++) {
+			err = assert_page(obj, index, radix_tree_lookup(&pages, index));
+			if (err)
+				break;
+		}
+		i915_gem_object_unpin_pages(obj);
+	} while (err == 0 && !__igt_timeout(end_time, NULL));
+
+out_obj:
+	list_for_each_entry_safe(src, n, &source_list, st_link) {
+		i915_gem_object_unpin_pages(src);
+		i915_gem_object_put(src);
+	}
+	for (index = 0; index < npages; index++)
+		radix_tree_delete(&pages, index);
+	i915_gem_object_put(obj);
+out:
+	return err;
+}
+
+int i915_gem_scratch_mock_selftests(void)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_gem_scratch_empty),
+		SUBTEST(igt_gem_scratch_linked),
+		SUBTEST(igt_gem_scratch_clear),
+		SUBTEST(igt_gem_scratch_1d),
+		SUBTEST(igt_gem_scratch_single),
+		SUBTEST(igt_gem_scratch_repeat),
+		SUBTEST(igt_gem_scratch_smoketest),
+	};
+	struct drm_i915_private *i915;
+	int err;
+
+	i915 = mock_gem_device();
+	if (!i915)
+		return -ENOMEM;
+
+	err = i915_subtests(tests, i915);
+
+	drm_dev_unref(&i915->drm);
+	return err;
+}
+
+int i915_gem_scratch_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+	};
+
+	return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 088f45bc6199..33fe54bacba0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -18,6 +18,7 @@ selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
+selftest(scratch, i915_gem_scratch_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 19c6fce837df..7b7e243b30a5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -18,6 +18,7 @@ selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(timelines, i915_gem_timeline_mock_selftests)
 selftest(requests, i915_gem_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
+selftest(scratch, i915_gem_scratch_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index f077681fc637..5f919792fb46 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -318,6 +318,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_PERF_OPEN		0x36
 #define DRM_I915_PERF_ADD_CONFIG	0x37
 #define DRM_I915_PERF_REMOVE_CONFIG	0x38
+#define DRM_I915_GEM_SET_PAGES		0x39
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -348,7 +349,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
-#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create_v2)
 #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -375,6 +376,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
 #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_GEM_SET_PAGES	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_PAGES, struct drm_i915_gem_set_pages)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -527,6 +529,13 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
 
+/*
+ * Query the version of DRM_I915_GEM_CREATE supported.
+ * v0 - Initial version
+ * v1 - Adds flags and support for creating of scratch objects
+ */
+#define I915_PARAM_CREATE_VERSION	52
+
 typedef struct drm_i915_getparam {
 	__s32 param;
 	/*
@@ -627,6 +636,25 @@ struct drm_i915_gem_create {
 	__u32 pad;
 };
 
+struct drm_i915_gem_create_v2 {
+	/**
+	 * Requested size for the object.
+	 *
+	 * The (page-aligned) allocated size for the object will be returned.
+	 */
+	__u64 size;
+	/**
+	 * Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	__u32 handle;
+	__u32 pad;
+
+	__u64 flags;
+#define I915_GEM_CREATE_SCRATCH 0x1
+};
+
 struct drm_i915_gem_pread {
 	/** Handle for the object being read. */
 	__u32 handle;
@@ -1270,6 +1298,20 @@ struct drm_i915_gem_madvise {
 	__u32 retained;
 };
 
+struct drm_i915_gem_set_pages {
+	__u32 dst_handle;
+	__u32 src_handle;
+
+	__u64 dst_offset;
+	__u64 src_offset;
+
+	__u64 dst_stride;
+	__u64 src_stride;
+
+	__u64 width;
+	__u64 height;
+};
+
 /* flags */
 #define I915_OVERLAY_TYPE_MASK 		0xff
 #define I915_OVERLAY_YUV_PLANAR 	0x01
-- 
2.15.1



More information about the Intel-gfx mailing list