[Intel-gfx] [PATCH 125/190] drm/i915: Track pinned VMA

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 02:45:09 PST 2016


Treat the VMA as the primary struct responsible for tracking bindings
into the GPU's VM. That is we want to treat the VMA returned after we
pin an object into the VM as the cookie we hold and eventually release
when unpinning. Doing so eliminates the ambiguity in pinning the object
and then searching for the relevant pin later.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c          |  80 ++++++-----
 drivers/gpu/drm/i915/i915_drv.h              |  79 +++-------
 drivers/gpu/drm/i915/i915_gem.c              | 208 ++++++---------------------
 drivers/gpu/drm/i915/i915_gem_context.c      |  56 ++++----
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  55 ++++---
 drivers/gpu/drm/i915/i915_gem_fence.c        |  61 ++++----
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  62 ++++----
 drivers/gpu/drm/i915/i915_gem_gtt.h          |  14 --
 drivers/gpu/drm/i915/i915_gem_render_state.c |  30 ++--
 drivers/gpu/drm/i915/i915_gem_render_state.h |   2 +-
 drivers/gpu/drm/i915/i915_gem_request.c      |   7 +-
 drivers/gpu/drm/i915/i915_gem_request.h      |   2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c       |   2 +-
 drivers/gpu/drm/i915/i915_gem_tiling.c       |  40 +++---
 drivers/gpu/drm/i915/i915_gpu_error.c        |  52 +++----
 drivers/gpu/drm/i915/i915_guc_submission.c   |  30 ++--
 drivers/gpu/drm/i915/intel_display.c         |  64 +++++----
 drivers/gpu/drm/i915/intel_drv.h             |   8 +-
 drivers/gpu/drm/i915/intel_fbc.c             |   2 +-
 drivers/gpu/drm/i915/intel_fbdev.c           |  42 +++---
 drivers/gpu/drm/i915/intel_guc_loader.c      |  30 ++--
 drivers/gpu/drm/i915/intel_lrc.c             | 106 +++++++-------
 drivers/gpu/drm/i915/intel_overlay.c         |  50 ++++---
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 181 ++++++++++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.h      |  15 +-
 drivers/gpu/drm/i915/intel_sprite.c          |   8 +-
 26 files changed, 576 insertions(+), 710 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d186d256f467..e923dc192f54 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+	return i915_gem_object_to_ggtt(obj, NULL) ? "g" : " ";
 }
 
 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -266,7 +266,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	u64 total_obj_size, total_gtt_size;
+	u64 total_obj_size;
 	LIST_HEAD(stolen);
 	int count, ret;
 
@@ -274,7 +274,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	total_obj_size = total_gtt_size = count = 0;
+	total_obj_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		if (obj->stolen == NULL)
 			continue;
@@ -282,7 +282,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		list_add(&obj->obj_exec_link, &stolen);
 
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -305,8 +304,8 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
-		   count, total_obj_size, total_gtt_size);
+	seq_printf(m, "Total %d objects, %llu bytes\n",
+		   count, total_obj_size);
 	return 0;
 }
 
@@ -315,7 +314,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		size += i915_gem_obj_total_ggtt_size(obj); \
 		++count; \
 		if (obj->map_and_fenceable) { \
-			mappable_size += i915_gem_obj_ggtt_size(obj); \
+			mappable_size += obj->base.size; \
 			++mappable_count; \
 		} \
 	} \
@@ -403,10 +402,10 @@ static void print_batch_pool_stats(struct seq_file *m,
 
 #define count_vmas(list, member) do { \
 	list_for_each_entry(vma, list, member) { \
-		size += i915_gem_obj_total_ggtt_size(vma->obj); \
+		size += vma->size; \
 		++count; \
 		if (vma->obj->map_and_fenceable) { \
-			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+			mappable_size += vma->size; \
 			++mappable_count; \
 		} \
 	} \
@@ -459,11 +458,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		if (obj->fault_mappable) {
-			size += i915_gem_obj_ggtt_size(obj);
+			size += obj->base.size;
 			++count;
 		}
 		if (obj->pin_display) {
-			mappable_size += i915_gem_obj_ggtt_size(obj);
+			mappable_size += obj->base.size;
 			++mappable_count;
 		}
 		if (obj->madv == I915_MADV_DONTNEED) {
@@ -517,30 +516,29 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	u64 total_obj_size, total_gtt_size;
+	u64 total_obj_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	total_obj_size = total_gtt_size = count = 0;
+	total_obj_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+		if (list == PINNED_LIST && !obj->pin_display)
 			continue;
 
 		seq_puts(m, "   ");
 		describe_obj(m, obj);
 		seq_putc(m, '\n');
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
-		   count, total_obj_size, total_gtt_size);
+	seq_printf(m, "Total %d objects, %llu bytes\n",
+		   count, total_obj_size);
 
 	return 0;
 }
@@ -2001,40 +1999,44 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 static void i915_dump_lrc_obj(struct seq_file *m,
 			      struct intel_engine_cs *ring,
-			      struct drm_i915_gem_object *ctx_obj)
+			      struct intel_context *ctx)
 {
+	struct drm_i915_gem_object *obj = ctx->engine[ring->id].state;
+	struct i915_vma *vma = ctx->engine[ring->id].vma;
 	struct page *page;
-	uint32_t *reg_state;
 	int j;
-	unsigned long ggtt_offset = 0;
 
-	if (ctx_obj == NULL) {
-		seq_printf(m, "Context on %s with no gem object\n",
-			   ring->name);
+	seq_printf(m, "CONTEXT: %s\n", ring->name);
+
+	if (obj == NULL) {
+		seq_printf(m, "\tUnallocated\n\n");
 		return;
 	}
 
 	seq_printf(m, "CONTEXT: %s\n", ring->name);
-
-	if (!i915_gem_obj_ggtt_bound(ctx_obj))
+	if (vma == NULL) {
 		seq_puts(m, "\tNot bound in GGTT\n");
-	else
-		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+	} else {
+		seq_printf(m, "\tBound in GGTT at %x\n",
+			   lower_32_bits(vma->node.start));
+	}
 
-	if (i915_gem_object_get_pages(ctx_obj)) {
-		seq_puts(m, "\tFailed to get pages for context object\n");
+	if (i915_gem_object_get_pages(obj)) {
+		seq_puts(m, "\tFailed to get pages for context object\n\n");
 		return;
 	}
 
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	if (!WARN_ON(page == NULL)) {
-		reg_state = kmap_atomic(page);
-
+	page = i915_gem_object_get_page(obj, LRC_STATE_PN);
+	if (page != NULL) {
+		uint32_t *reg_state = kmap_atomic(page);
 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
-			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				   ggtt_offset + 4096 + (j * 4),
-				   reg_state[j], reg_state[j + 1],
-				   reg_state[j + 2], reg_state[j + 3]);
+			seq_printf(m,
+				   "\t[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				   j * 4,
+				   reg_state[j],
+				   reg_state[j + 1],
+				   reg_state[j + 2],
+				   reg_state[j + 3]);
 		}
 		kunmap_atomic(reg_state);
 	}
@@ -2062,7 +2064,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
 		for_each_ring(ring, dev_priv, i)
-			i915_dump_lrc_obj(m, ring, ctx->engine[i].state);
+			i915_dump_lrc_obj(m, ring, ctx);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -3131,7 +3133,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		struct page *page;
 		uint64_t *seqno;
 
-		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+		page = i915_gem_object_get_page(dev_priv->semaphore_vma->obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
 		for_each_ring(ring, dev_priv, i) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 89da35105a33..6b729baf6503 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -878,12 +878,14 @@ struct intel_context {
 	/* Legacy ring buffer submission */
 	struct {
 		struct drm_i915_gem_object *rcs_state;
+		struct i915_vma *rcs_vma;
 		bool initialized;
 	} legacy_hw_ctx;
 
 	/* Execlists */
 	struct {
 		struct drm_i915_gem_object *state;
+		struct i915_vma *vma;
 		struct intel_ring *ring;
 		int pin_count;
 		bool initialised;
@@ -1705,7 +1707,7 @@ struct drm_i915_private {
 	struct pci_dev *bridge_dev;
 	struct intel_engine_cs ring[I915_NUM_RINGS];
 	struct intel_context *kernel_context;
-	struct drm_i915_gem_object *semaphore_obj;
+	struct i915_vma *semaphore_vma;
 	uint32_t last_seqno, next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
@@ -2739,7 +2741,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
 	__i915_vma_unpin(vma);
 }
 
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
 			 uint64_t size,
@@ -2884,12 +2886,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
 				  bool write);
 int __must_check
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					      const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 				int align);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2910,47 +2911,15 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view);
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm);
-static inline u64
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
-{
-	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view);
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm);
-
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-				struct i915_address_space *vm);
 struct i915_vma *
 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-			  const struct i915_ggtt_view *view);
+		     struct i915_address_space *vm,
+		     const struct i915_ggtt_view *view);
 
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view);
-
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
-
-/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view);
 
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
@@ -2959,29 +2928,21 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
-static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
-}
+/* Some GGTT VM helpers */
+#define i915_obj_to_ggtt(obj) (&(to_i915((obj)->base.dev)->gtt.base))
 
-static inline int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *
+i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
+			const struct i915_ggtt_view *view)
 {
-	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
+	return i915_gem_obj_to_vma(obj, i915_obj_to_ggtt(obj), view);
 }
 
-void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-				     const struct i915_ggtt_view *view);
-static inline void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+static inline unsigned long
+i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
+			    const struct i915_ggtt_view *view)
 {
-	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+	return i915_gem_object_to_ggtt(o, view)->node.start;
 }
 
 /* i915_gem_fence.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24e6e4773ac8..01c20a336c04 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -775,16 +775,18 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 			 struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_vma *vma;
 	ssize_t remain;
 	loff_t offset, page_base;
 	char __user *user_data;
 	int page_offset, page_length, ret;
 
-	ret = i915_gem_object_ggtt_pin(obj, NULL,
-				       0, 0,
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 
 				       PIN_MAPPABLE | PIN_NONBLOCK);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto out;
+	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
@@ -797,7 +799,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	user_data = to_user_ptr(args->data_ptr);
 	remain = args->size;
 
-	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+	offset = vma->node.start + args->offset;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
@@ -832,7 +834,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 out_flush:
 	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	i915_vma_unpin(vma);
 out:
 	return ret;
 }
@@ -1397,6 +1399,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_ggtt_view view = i915_ggtt_view_normal;
+	struct i915_vma *ggtt;
 	pgoff_t page_offset;
 	unsigned long pfn;
 	int ret = 0;
@@ -1445,9 +1448,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* Now pin it into the GTT if needed */
-	ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
-	if (ret)
+	ggtt = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+	if (IS_ERR(ggtt)) {
+		ret = PTR_ERR(ggtt);
 		goto unlock;
+	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
 	if (ret)
@@ -1458,8 +1463,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		goto unpin;
 
 	/* Finally, remap it using the new GTT offset */
-	pfn = dev_priv->gtt.mappable_base +
-		i915_gem_obj_ggtt_offset_view(obj, &view);
+	pfn = dev_priv->gtt.mappable_base + ggtt->node.start;
 	pfn >>= PAGE_SHIFT;
 
 	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
@@ -1501,7 +1505,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 					    pfn + page_offset);
 	}
 unpin:
-	i915_gem_object_ggtt_unpin_view(obj, &view);
+	__i915_vma_unpin(ggtt);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 out:
@@ -3010,7 +3014,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 					    old_write_domain);
 
 	/* And bump the LRU for this access */
-	vma = i915_gem_obj_to_ggtt(obj);
+	vma = i915_gem_object_to_ggtt(obj, NULL);
 	if (vma && drm_mm_node_allocated(&vma->node) && !vma->active)
 		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
@@ -3233,11 +3237,12 @@ rpm_put:
  * Can be called from an uninterruptible phase (modesetting) and allows
  * any flushes to be pipelined (for pageflips).
  */
-int
+struct i915_vma *
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     const struct i915_ggtt_view *view)
 {
+	struct i915_vma *vma;
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
@@ -3257,19 +3262,23 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
 					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
-	if (ret)
+	if (ret) {
+		vma = ERR_PTR(ret);
 		goto err_unpin_display;
+	}
 
 	/* As the user may map the buffer once pinned in the display plane
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+	vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 				       view->type == I915_GGTT_VIEW_NORMAL ?
 				       PIN_MAPPABLE : 0);
-	if (ret)
+	if (IS_ERR(vma))
 		goto err_unpin_display;
 
+	WARN_ON(obj->pin_display > vma->pin_count);
+
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -3288,24 +3297,24 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	/* Increment the pages_pin_count to guard against the shrinker */
 	obj->pages_pin_count++;
 
-	return 0;
+	return vma;
 
 err_unpin_display:
 	obj->pin_display--;
-	return ret;
+	return vma;
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					 const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-	if (WARN_ON(obj->pin_display == 0))
+	if (WARN_ON(vma->obj->pin_display == 0))
 		return;
 
-	i915_gem_object_ggtt_unpin_view(obj, view);
+	vma->obj->pin_display--;
+	vma->obj->pages_pin_count--;
 
-	obj->pages_pin_count--;
-	obj->pin_display--;
+	i915_vma_unpin(vma);
+	WARN_ON(vma->obj->pin_display > vma->pin_count);
 }
 
 /**
@@ -3511,26 +3520,24 @@ err:
 	return ret;
 }
 
-int
+struct i915_vma *
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
-			 const struct i915_ggtt_view *view,
+			 const struct i915_ggtt_view *ggtt_view,
 			 uint64_t size,
 			 uint64_t alignment,
 			 uint64_t flags)
 {
+	struct i915_address_space *vm = i915_obj_to_ggtt(obj);
 	struct i915_vma *vma;
 	int ret;
 
-	if (view == NULL)
-		view = &i915_ggtt_view_normal;
-
-	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, ggtt_view);
 	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+		return vma;
 
 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
 		if (flags & PIN_NONBLOCK && (vma->pin_count | vma->active))
-			return -ENOSPC;
+			return ERR_PTR(-ENOSPC);
 
 		WARN(vma->pin_count,
 		     "bo is already pinned in ggtt with incorrect alignment:"
@@ -3543,17 +3550,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 		     obj->map_and_fenceable);
 		ret = i915_vma_unbind(vma);
 		if (ret)
-			return ret;
+			return ERR_PTR(ret);
 	}
 
-	return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
-}
+	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+	if (ret)
+		return ERR_PTR(ret);
 
-void
-i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-				const struct i915_ggtt_view *view)
-{
-	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
+	return vma;
 }
 
 int
@@ -3824,34 +3828,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
-		    vma->vm == vm)
-			return vma;
-	}
-	return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-					   const struct i915_ggtt_view *view)
-{
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
-	struct i915_vma *vma;
-
-	if (WARN_ONCE(!view, "no view specified"))
-		return ERR_PTR(-EINVAL);
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma;
-	return NULL;
-}
-
 static void
 i915_gem_stop_ringbuffers(struct drm_device *dev)
 {
@@ -4329,104 +4305,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 	}
 }
 
-/* All the new VM stuff */
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm)
-{
-	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-	struct i915_vma *vma;
-
-	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma->node.start;
-	}
-
-	WARN(1, "%s vma for this object not found.\n",
-	     i915_is_ggtt(vm) ? "global" : "ppgtt");
-	return -1;
-}
-
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view)
-{
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma->node.start;
-
-	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
-	return -1;
-}
-
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
-			return true;
-	}
-
-	return false;
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view)
-{
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
-		    drm_mm_node_allocated(&vma->node))
-			return true;
-
-	return false;
-}
-
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-				struct i915_address_space *vm)
-{
-	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-	struct i915_vma *vma;
-
-	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-	BUG_ON(list_empty(&o->vma_list));
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma->node.size;
-	}
-	return 0;
-}
-
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count > 0)
-			return true;
-
-	return false;
-}
-
 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
 struct page *
 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c54c17944796..0a5f1d5fa788 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -327,8 +327,10 @@ void i915_gem_context_reset(struct drm_device *dev)
 		struct intel_context *lctx = ring->last_context;
 
 		if (lctx) {
-			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+			if (lctx->legacy_hw_ctx.rcs_vma) {
+				i915_vma_unpin(lctx->legacy_hw_ctx.rcs_vma);
+				lctx->legacy_hw_ctx.rcs_vma = NULL;
+			}
 
 			i915_gem_context_unreference(lctx);
 			ring->last_context = NULL;
@@ -379,7 +381,7 @@ int i915_gem_context_init(struct drm_device *dev)
 
 	if (ctx->legacy_hw_ctx.rcs_state) {
 		u32 alignment = get_context_alignment(dev);
-		int ret;
+		struct i915_vma *vma;
 
 		/* We may need to do things with the shrinker which
 		 * require us to immediately switch back to the default
@@ -388,13 +390,13 @@ int i915_gem_context_init(struct drm_device *dev)
 		 * be available. To avoid this we always pin the default
 		 * context.
 		 */
-		ret = i915_gem_object_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
+		vma = i915_gem_object_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
 					       NULL, 0, alignment, PIN_HIGH);
-		if (ret) {
+		if (IS_ERR(vma)) {
 			DRM_ERROR("Failed to pinned default global context (error %d)\n",
-				  ret);
+				  (int)PTR_ERR(vma));
 			i915_gem_context_unreference(ctx);
-			return ret;
+			return PTR_ERR(vma);
 		}
 	}
 
@@ -427,13 +429,13 @@ void i915_gem_context_fini(struct drm_device *dev)
 		WARN_ON(!dev_priv->ring[RCS].last_context);
 		if (dev_priv->ring[RCS].last_context == dctx) {
 			/* Fake switch to NULL context */
-			WARN_ON(i915_gem_object_is_active(dctx->legacy_hw_ctx.rcs_state));
-			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
+			WARN_ON(dctx->legacy_hw_ctx.rcs_vma->active);
+			i915_vma_unpin(dctx->legacy_hw_ctx.rcs_vma);
 			i915_gem_context_unreference(dctx);
 			dev_priv->ring[RCS].last_context = NULL;
 		}
 
-		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
+		i915_vma_unpin(dctx->legacy_hw_ctx.rcs_vma);
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -553,8 +555,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
-			flags);
+	intel_ring_emit(ring,
+			req->ctx->legacy_hw_ctx.rcs_vma->node.start | flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -666,20 +668,29 @@ static int do_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_context *from = engine->last_context;
+	struct intel_context *from;
 	u32 hw_flags = 0;
 	int ret, i;
 
-	if (should_skip_switch(engine, from, to))
+	if (should_skip_switch(engine, engine->last_context, to))
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
 	if (engine->id == RCS) {
 		u32 alignment = get_context_alignment(engine->dev);
-		ret = i915_gem_object_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+		struct i915_vma *vma;
+
+		vma = i915_gem_object_ggtt_pin(to->legacy_hw_ctx.rcs_state,
 					       NULL, 0, alignment, PIN_HIGH);
-		if (ret)
-			return ret;
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
+		to->legacy_hw_ctx.rcs_vma = vma;
+
+		if (WARN_ON(!(vma->bound & GLOBAL_BIND))) {
+			ret = -ENODEV;
+			goto unpin_out;
+		}
 	}
 
 	/*
@@ -790,8 +801,6 @@ static int do_switch(struct drm_i915_gem_request *req)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		struct drm_i915_gem_object *obj = from->legacy_hw_ctx.rcs_state;
-
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -799,11 +808,10 @@ static int do_switch(struct drm_i915_gem_request *req)
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
-
+		i915_vma_move_to_active(from->legacy_hw_ctx.rcs_vma, req, 0);
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+		i915_vma_unpin(from->legacy_hw_ctx.rcs_vma);
+
 		i915_gem_context_unreference(from);
 	}
 
@@ -814,7 +822,7 @@ done:
 
 unpin_out:
 	if (engine->id == RCS)
-		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+		i915_vma_unpin(to->legacy_hw_ctx.rcs_vma);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a1b6678fb075..4d15dd32e365 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -174,8 +174,8 @@ eb_lookup_vmas(struct eb_vmas *eb,
 		 * from the (obj, vm) we don't run the risk of creating
 		 * duplicated vmas for the same vm.
 		 */
-		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
-		if (IS_ERR(vma)) {
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+		if (unlikely(IS_ERR(vma))) {
 			DRM_DEBUG("Failed to lookup VMA\n");
 			ret = PTR_ERR(vma);
 			goto err;
@@ -348,21 +348,26 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_vma *vma;
 	uint64_t delta = relocation_target(reloc, target_offset);
 	uint64_t offset;
 	void __iomem *reloc_page;
 	int ret;
 
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
-		return ret;
+		goto unpin;
 
 	ret = i915_gem_object_put_fence(obj);
 	if (ret)
-		return ret;
+		goto unpin;
 
 	/* Map the page containing the relocation we're going to perform.  */
-	offset = i915_gem_obj_ggtt_offset(obj);
+	offset = vma->node.start;
 	offset += reloc->offset;
 	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 					      offset & PAGE_MASK);
@@ -384,7 +389,9 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 
 	io_mapping_unmap_atomic(reloc_page);
 
-	return 0;
+unpin:
+	i915_vma_unpin(vma);
+	return ret;
 }
 
 static void
@@ -1222,7 +1229,7 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static struct i915_vma*
+static struct i915_vma *
 i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
 			  struct drm_i915_gem_object *batch_obj,
@@ -1246,31 +1253,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
 			      batch_start_offset,
 			      batch_len,
 			      is_master);
-	if (ret)
+	if (ret) {
+		if (ret == -EACCES) /* unhandled chained batch */
+			vma = NULL;
+		else
+			vma = ERR_PTR(ret);
 		goto err;
+	}
 
-	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
-	if (ret)
+	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err;
-
-	i915_gem_object_unpin_pages(shadow_batch_obj);
+	}
 
 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
-	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
 	vma->exec_entry = shadow_exec_entry;
 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
 	drm_gem_object_reference(&shadow_batch_obj->base);
 	list_add_tail(&vma->exec_list, &eb->vmas);
 
-	return vma;
-
 err:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
-	if (ret == -EACCES) /* unhandled chained batch */
-		return NULL;
-	else
-		return ERR_PTR(ret);
+	return vma;
 }
 
 static int
@@ -1604,6 +1610,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (dispatch_flags & I915_DISPATCH_SECURE) {
 		struct drm_i915_gem_object *obj = params->batch_vma->obj;
+		struct i915_vma *vma;
 
 		/*
 		 * So on first glance it looks freaky that we pin the batch here
@@ -1615,11 +1622,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
-		if (ret)
+		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto err;
+		}
 
-		params->batch_vma = i915_gem_obj_to_ggtt(obj);
+		params->batch_vma = vma;
 	}
 
 	/* Allocate a request for this batch buffer nice and early. */
@@ -1635,7 +1644,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 */
-	params->request->batch_obj = params->batch_vma->obj;
+	params->request->batch = params->batch_vma;
 
 	ret = i915_gem_request_add_to_client(params->request, file);
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index ff085efcf0e5..8ba05a0f15d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -85,20 +85,14 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
 	POSTING_READ(fence_reg_lo);
 
 	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint64_t val;
-
-		/* Adjust fence size to match tiled area */
-		if (obj->tiling_mode != I915_TILING_NONE) {
-			uint32_t row_size = obj->stride *
-				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-			size = (size / row_size) * row_size;
-		}
-
-		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-				 0xfffff000) << 32;
-		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
+		u32 row_size = obj->stride * (obj->tiling_mode == I915_TILING_Y  ? 32 : 8);
+		u32 size = (u32)vma->node.size / row_size * row_size;
+		u64 val;
+
+		val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
+		val |= vma->node.start & 0xfffff000;
+		val |= (u64)((obj->stride / 128) - 1) << fence_pitch_shift;
 		if (obj->tiling_mode == I915_TILING_Y)
 			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 		val |= I965_FENCE_REG_VALID;
@@ -121,15 +115,17 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
 	u32 val;
 
 	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
+		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
 		int pitch_val;
 		int tile_width;
 
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+		WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
+		     !is_power_of_2(vma->node.size) ||
+		     (vma->node.start & (vma->node.size - 1)),
+		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08lx) aligned\n",
+		     (long)vma->node.start,
+		     obj->map_and_fenceable,
+		     (long)vma->node.size);
 
 		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 			tile_width = 128;
@@ -140,10 +136,10 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
 		pitch_val = obj->stride / tile_width;
 		pitch_val = ffs(pitch_val) - 1;
 
-		val = i915_gem_obj_ggtt_offset(obj);
+		val = vma->node.start;
 		if (obj->tiling_mode == I915_TILING_Y)
 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I915_FENCE_SIZE_BITS(size);
+		val |= I915_FENCE_SIZE_BITS(vma->node.size);
 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 		val |= I830_FENCE_REG_VALID;
 	} else
@@ -160,22 +156,22 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
 	uint32_t val;
 
 	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
+		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
 		uint32_t pitch_val;
 
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), size);
+		WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
+		     !is_power_of_2(vma->node.size) ||
+		     (vma->node.start & (vma->node.size - 1)),
+		     "object 0x%08lx not 512K or pot-size 0x%08lx aligned\n",
+		     (long)vma->node.start, (long)vma->node.size);
 
 		pitch_val = obj->stride / 128;
 		pitch_val = ffs(pitch_val) - 1;
 
-		val = i915_gem_obj_ggtt_offset(obj);
+		val = vma->node.start;
 		if (obj->tiling_mode == I915_TILING_Y)
 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I830_FENCE_SIZE_BITS(size);
+		val |= I830_FENCE_SIZE_BITS(vma->node.size);
 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 		val |= I830_FENCE_REG_VALID;
 	} else
@@ -426,11 +422,6 @@ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
 		return true;
 	} else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6652df57e5b0..0aadfaee2150 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3250,14 +3250,10 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 
 	GEM_BUG_ON(vm->closed);
 
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-		return ERR_PTR(-EINVAL);
-
 	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
 		init_request_active(&vma->last_read[i], i915_vma_retire);
@@ -3267,55 +3263,69 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	vma->size = obj->base.size;
 	vma->is_ggtt = i915_is_ggtt(vm);
 
-	if (i915_is_ggtt(vm)) {
+	if (ggtt_view) {
 		vma->ggtt_view = *ggtt_view;
 		if (ggtt_view->type == I915_GGTT_VIEW_PARTIAL)
 			vma->size = ggtt_view->params.partial.size << PAGE_SHIFT;
 		else if (ggtt_view->type == I915_GGTT_VIEW_ROTATED)
 			vma->size = ggtt_view->params.rotation_info.size;
-	} else
+	}
+
+	if (!vma->is_ggtt)
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
 	list_add_tail(&vma->obj_link, &obj->vma_list);
-
 	return vma;
 }
 
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
+static inline bool vma_matches(struct i915_vma *vma,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *view)
 {
-	struct i915_vma *vma;
+	if (vma->vm != vm)
+		return false;
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	if (!vma)
-		vma = __i915_gem_vma_create(obj, vm,
-					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+	if (!vma->is_ggtt)
+		return true;
 
-	return vma;
+	if (view == NULL)
+		return vma->ggtt_view.type == 0;
+
+	if (vma->ggtt_view.type != view->type)
+		return false;
+
+	return memcmp(&vma->ggtt_view.params,
+		      &view->params,
+		      sizeof(view->params)) == 0;
 }
 
 struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view)
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm,
+		    const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
 	struct i915_vma *vma;
 
-	if (WARN_ON(!view))
-		return ERR_PTR(-EINVAL);
+	list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
+		if (vma_matches(vma, vm, view))
+			return vma;
 
-	vma = i915_gem_obj_to_ggtt_view(obj, view);
+	return NULL;
+}
 
-	if (IS_ERR(vma))
-		return vma;
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view)
+{
+	struct i915_vma *vma;
 
+	vma = i915_gem_obj_to_vma(obj, vm, view);
 	if (!vma)
-		vma = __i915_gem_vma_create(obj, ggtt, view);
+		vma = __i915_gem_vma_create(obj, vm, view);
 
 	GEM_BUG_ON(vma->closed);
 	return vma;
-
 }
 
 static struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e6f64dcb2e77..7f57dea246d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -573,18 +573,4 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-
-static inline bool
-i915_ggtt_view_equal(const struct i915_ggtt_view *a,
-                     const struct i915_ggtt_view *b)
-{
-	if (WARN_ON(!a || !b))
-		return false;
-
-	if (a->type != b->type)
-		return false;
-	if (a->type != I915_GGTT_VIEW_NORMAL)
-		return !memcmp(&a->params, &b->params, sizeof(a->params));
-	return true;
-}
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 830c0d24b11e..89b5c99bbb02 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
 struct render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
-	u64 ggtt_offset;
+	struct i915_vma *vma;
 	int gen;
 	u32 aux_batch_size;
 	u32 aux_batch_offset;
@@ -56,7 +56,7 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
 
 static int render_state_init(struct render_state *so, struct drm_device *dev)
 {
-	int ret;
+	struct i915_vma *vma;
 
 	so->gen = INTEL_INFO(dev)->gen;
 	so->rodata = render_state_get_rodata(dev, so->gen);
@@ -70,16 +70,14 @@ static int render_state_init(struct render_state *so, struct drm_device *dev)
 	if (so->obj == NULL)
 		return -ENOMEM;
 
-	ret = i915_gem_object_ggtt_pin(so->obj, NULL, 0, 0, 0);
-	if (ret)
-		goto free_gem;
+	vma = i915_gem_object_ggtt_pin(so->obj, NULL, 0, 0, 0);
+	if (IS_ERR(vma)) {
+		drm_gem_object_unreference(&so->obj->base);
+		return PTR_ERR(vma);
+	}
 
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+	so->vma = vma;
 	return 0;
-
-free_gem:
-	drm_gem_object_unreference(&so->obj->base);
-	return ret;
 }
 
 /*
@@ -119,7 +117,7 @@ static int render_state_setup(struct render_state *so)
 		u32 s = rodata->batch[i];
 
 		if (i * 4  == rodata->reloc[reloc_index]) {
-			u64 r = s + so->ggtt_offset;
+			u64 r = s + so->vma->node.start,
 			s = lower_32_bits(r);
 			if (so->gen >= 8) {
 				if (i + 1 >= rodata->batch_items ||
@@ -174,7 +172,7 @@ err_out:
 
 static void render_state_fini(struct render_state *so)
 {
-	i915_gem_object_ggtt_unpin(so->obj);
+	i915_vma_unpin(so->vma);
 	drm_gem_object_unreference(&so->obj->base);
 }
 
@@ -207,14 +205,14 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	struct render_state so;
 	int ret;
 
-	ret = render_state_prepare(req->engine, &so);
+	ret = render_state_prepare(req->engine, memset(&so, 0, sizeof(so)));
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+	ret = req->engine->emit_bb_start(req, so.vma->node.start,
 					 so.rodata->batch_items * 4,
 					 I915_DISPATCH_SECURE);
 	if (ret)
@@ -222,7 +220,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 
 	if (so.aux_batch_size > 8) {
 		ret = req->engine->emit_bb_start(req,
-						 (so.ggtt_offset +
+						 (so.vma->node.start +
 						  so.aux_batch_offset),
 						 so.aux_batch_size,
 						 I915_DISPATCH_SECURE);
@@ -230,7 +228,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 			goto out;
 	}
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
+	i915_vma_move_to_active(so.vma, req, 0);
 out:
 	render_state_fini(&so);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44fca8599bb..18cce3f06e9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -24,7 +24,7 @@
 #ifndef _I915_GEM_RENDER_STATE_H_
 #define _I915_GEM_RENDER_STATE_H_
 
-#include <linux/types.h>
+struct drm_i915_gem_request;
 
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 1886048f0acd..4ebe4b7e02d0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -420,15 +420,10 @@ static void i915_gem_mark_busy(struct drm_i915_private *dev_priv)
  */
 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
-	struct intel_ring *ring;
+	struct intel_ring *ring = request->ring;
 	u32 request_start;
 	int ret;
 
-	if (WARN_ON(request == NULL))
-		return;
-
-	ring = request->ring;
-
 	/*
 	 * To ensure that this call will not fail, space for its emissions
 	 * should already have been reserved in the ring buffer. Let the ring
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 4b38cd731124..2294234b4bf5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -84,7 +84,7 @@ struct drm_i915_gem_request {
 
 	/** Batch buffer related to this request if any (used for
 	    error state dump only) */
-	struct drm_i915_gem_object *batch_obj;
+	struct i915_vma *batch;
 	struct list_head active_list;
 
 	/** Time at which this request was emitted, in jiffies. */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c110563823bd..401fa603b3e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -670,7 +670,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 387246f19ce2..f83cb4329c8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,33 +114,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 }
 
 /* Is the current GTT allocation valid for the change in tiling? */
-static bool
+static int
 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
+	struct i915_vma *vma;
 	u32 size;
 
 	if (tiling_mode == I915_TILING_NONE)
-		return true;
+		return 0;
 
 	if (INTEL_INFO(obj->base.dev)->gen >= 4)
-		return true;
+		return 0;
+
+	vma = i915_gem_object_to_ggtt(obj, NULL);
+	if (vma == NULL)
+		return 0;
+
+	if (!obj->map_and_fenceable)
+		return 0;
 
 	if (INTEL_INFO(obj->base.dev)->gen == 3) {
-		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
-			return false;
+		if (vma->node.start & ~I915_FENCE_START_MASK)
+			goto bad;
 	} else {
-		if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
-			return false;
+		if (vma->node.start & ~I830_FENCE_START_MASK)
+			goto bad;
 	}
 
 	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
-	if (i915_gem_obj_ggtt_size(obj) != size)
-		return false;
+	if (vma->node.size < size)
+		goto bad;
 
-	if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
-		return false;
+	if (vma->node.start & (size - 1))
+		goto bad;
 
-	return true;
+	return 0;
+
+bad:
+	return i915_vma_unbind(vma);
 }
 
 /**
@@ -227,10 +238,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		 * has to also include the unfenced register the GPU uses
 		 * whilst executing a fenced command for an untiled object.
 		 */
-		if (obj->map_and_fenceable &&
-		    !i915_gem_object_fence_ok(obj, args->tiling_mode))
-			ret = i915_gem_object_ggtt_unbind(obj);
-
+		ret = i915_gem_object_fence_ok(obj, args->tiling_mode);
 		if (ret == 0) {
 			if (obj->pages &&
 			    obj->madv == I915_MADV_WILLNEED &&
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9a18fc502145..7fe9281bf37e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -593,17 +593,20 @@ static void i915_error_state_free(struct kref *error_ref)
 
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *dev_priv,
-			 struct drm_i915_gem_object *src,
-			 struct i915_address_space *vm)
+			 struct i915_vma *vma)
 {
+	struct drm_i915_gem_object *src;
 	struct drm_i915_error_object *dst;
-	struct i915_vma *vma = NULL;
 	int num_pages;
 	bool use_ggtt;
 	int i = 0;
 	u64 reloc_offset;
 
-	if (src == NULL || src->pages == NULL)
+	if (vma == NULL)
+		return NULL;
+
+	src = vma->obj;
+	if (src->pages == NULL)
 		return NULL;
 
 	num_pages = src->base.size >> PAGE_SHIFT;
@@ -612,26 +615,19 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 	if (dst == NULL)
 		return NULL;
 
-	if (i915_gem_obj_bound(src, vm))
-		dst->gtt_offset = i915_gem_obj_offset(src, vm);
-	else
-		dst->gtt_offset = -1;
-
-	reloc_offset = dst->gtt_offset;
-	if (i915_is_ggtt(vm))
-		vma = i915_gem_obj_to_ggtt(src);
+	reloc_offset = dst->gtt_offset = vma->node.start;
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-		   vma && (vma->bound & GLOBAL_BIND) &&
+		   (vma->bound & GLOBAL_BIND) &&
 		   reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
 
 	/* Cannot access stolen address directly, try to use the aperture */
 	if (src->stolen) {
 		use_ggtt = true;
 
-		if (!(vma && vma->bound & GLOBAL_BIND))
+		if (!(vma->bound & GLOBAL_BIND))
 			goto unwind;
 
-		reloc_offset = i915_gem_obj_ggtt_offset(src);
+		reloc_offset = vma->node.start;
 		if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
 			goto unwind;
 	}
@@ -690,8 +686,6 @@ unwind:
 	kfree(dst);
 	return NULL;
 }
-#define i915_error_ggtt_object_create(dev_priv, src) \
-	i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
 
 static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
@@ -798,10 +792,10 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 	if (!i915.semaphores)
 		return;
 
-	if (!error->semaphore_obj)
+	if (!error->semaphore_obj && dev_priv->semaphore_vma)
 		error->semaphore_obj =
-			i915_error_ggtt_object_create(dev_priv,
-						      dev_priv->semaphore_obj);
+			i915_error_object_create(dev_priv,
+						 dev_priv->semaphore_vma);
 
 	for_each_ring(to, dev_priv, i) {
 		int idx;
@@ -949,9 +943,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
 
 	list_for_each_entry(vma, &dev_priv->gtt.base.active_list, vm_link) {
 		if ((error->ccid & PAGE_MASK) == vma->node.start) {
-			ering->ctx = i915_error_object_create(dev_priv,
-							      vma->obj,
-							      vma->vm);
+			ering->ctx = i915_error_object_create(dev_priv, vma);
 			break;
 		}
 	}
@@ -992,13 +984,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
 			 */
 			error->ring[i].batchbuffer =
 				i915_error_object_create(dev_priv,
-							 request->batch_obj,
-							 vm);
+							 request->batch);
 
 			if (HAS_BROKEN_CS_TLB(dev_priv))
 				error->ring[i].wa_batchbuffer =
-					i915_error_ggtt_object_create(dev_priv,
-								      engine->scratch.obj);
+					i915_error_object_create(dev_priv,
+								 engine->scratch.vma);
 
 			if (request->pid) {
 				struct task_struct *task;
@@ -1018,13 +1009,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
 			error->ring[i].cpu_ring_head = ring->head;
 			error->ring[i].cpu_ring_tail = ring->tail;
 			error->ring[i].ringbuffer =
-				i915_error_ggtt_object_create(dev_priv,
-							      ring->obj);
+				i915_error_object_create(dev_priv, ring->vma);
 		}
 
 		error->ring[i].hws_page =
-			i915_error_ggtt_object_create(dev_priv,
-						      engine->status_page.obj);
+			i915_error_object_create(dev_priv,
+						 engine->status_page.vma);
 
 		i915_gem_record_active_context(engine, error, &error->ring[i]);
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index baa5c34757ba..d6df94129796 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -392,7 +392,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 		struct guc_execlist_context *lrc = &desc.lrc[i];
 		struct intel_ring *ring = ctx->engine[i].ring;
 		struct intel_engine_cs *engine;
-		struct drm_i915_gem_object *obj;
 
 		/* TODO: We have a design issue to be solved here. Only when we
 		 * receive the first batch, we know which engine is used by the
@@ -401,23 +400,20 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 		 * for now who owns a GuC client. But for future owner of GuC
 		 * client, need to make sure lrc is pinned prior to enter here.
 		 */
-		obj = ctx->engine[i].state;
-		if (!obj)
+		if (ctx->engine[i].state == NULL)
 			break;	/* XXX: continue? */
 
 		engine = ring->engine;
 		lrc->context_desc = engine->execlist_context_descriptor;
 
 		/* The state page is after PPHWSP */
-		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
+		lrc->ring_lcra = ctx->engine[i].vma->node.start +
 				LRC_STATE_PN * PAGE_SIZE;
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
 				(engine->id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ring->obj;
-
-		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
-		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
+		lrc->ring_begin = ring->vma->node.start;
+		lrc->ring_end = lrc->ring_begin + ring->size - 1;
 		lrc->ring_next_free_location = lrc->ring_begin;
 		lrc->ring_current_tail_pointer_value = 0;
 
@@ -496,7 +492,7 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
 
 		if (timeout_counter)
 			usleep_range(1000, 2000);
-	};
+	}
 
 	kunmap_atomic(base);
 
@@ -611,25 +607,25 @@ int i915_guc_submit(struct i915_guc_client *client,
  */
 static struct i915_vma *guc_allocate_vma(struct drm_device *dev, u32 size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
-	int ret;
+	struct i915_vma *vma;
 
 	obj = i915_gem_alloc_object(dev, size);
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
 				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
-	if (ret) {
+	if (IS_ERR(vma)) {
 		drm_gem_object_unreference(&obj->base);
-		return ERR_PTR(ret);
+		return vma;
 	}
 
 	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
 	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
-	return i915_gem_obj_to_ggtt(obj);
+	return vma;
 }
 
 /**
@@ -991,7 +987,7 @@ int intel_guc_suspend(struct drm_device *dev)
 	/* any value greater than GUC_POWER_D0 */
 	data[1] = GUC_POWER_D1;
 	/* first page is shared data with GuC */
-	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+	data[2] = ctx->engine[RCS].vma->node.start;
 
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
@@ -1016,7 +1012,7 @@ int intel_guc_resume(struct drm_device *dev)
 	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
 	data[1] = GUC_POWER_D0;
 	/* first page is shared data with GuC */
-	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+	data[2] = ctx->engine[RCS].vma->node.start;
 
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e8f957785a64..313f1fb144b9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2322,7 +2322,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
 		return 0;
 }
 
-int
+struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
 			   const struct drm_plane_state *plane_state)
@@ -2331,6 +2331,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
+	struct i915_vma *vma;
 	u32 alignment;
 	int ret;
 
@@ -2352,12 +2353,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	case I915_FORMAT_MOD_Yf_TILED:
 		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
 			  "Y tiling bo slipped through, driver bug!\n"))
-			return -EINVAL;
+			return ERR_PTR(-ENODEV);
 		alignment = 1 * 1024 * 1024;
 		break;
 	default:
 		MISSING_CASE(fb->modifier[0]);
-		return -EINVAL;
+		return ERR_PTR(-ENODEV);
 	}
 
 	intel_fill_fb_ggtt_view(&view, fb, plane_state);
@@ -2379,10 +2380,11 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	 */
 	intel_runtime_pm_get(dev_priv);
 
-	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
-						   &view);
-	if (ret)
+	vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err_pm;
+	}
 
 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
 	 * fence, whereas 965+ only requires a fence if using
@@ -2409,29 +2411,31 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	}
 
 	intel_runtime_pm_put(dev_priv);
-	return 0;
+	return vma;
 
 err_unpin:
-	i915_gem_object_unpin_from_display_plane(obj, &view);
+	i915_gem_object_unpin_from_display_plane(vma);
 err_pm:
 	intel_runtime_pm_put(dev_priv);
-	return ret;
+	return ERR_PTR(ret);
 }
 
 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state)
+			       const struct drm_plane_state *state)
 {
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
+	struct i915_vma *vma;
 
 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
-	intel_fill_fb_ggtt_view(&view, fb, plane_state);
+	intel_fill_fb_ggtt_view(&view, fb, state);
 
 	if (view.type == I915_GGTT_VIEW_NORMAL)
 		i915_gem_object_unpin_fence(obj);
 
-	i915_gem_object_unpin_from_display_plane(obj, &view);
+	vma = i915_gem_object_to_ggtt(obj, &view);
+	i915_gem_object_unpin_from_display_plane(vma);
 }
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2628,7 +2632,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 			continue;
 
 		obj = intel_fb_obj(fb);
-		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
 			drm_framebuffer_reference(fb);
 			goto valid_fb;
 		}
@@ -2788,11 +2792,11 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_INFO(dev)->gen >= 4) {
 		I915_WRITE(DSPSURF(plane),
-			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+			   i915_gem_object_ggtt_offset(obj, NULL) + intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else
-		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+		I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
 	POSTING_READ(reg);
 }
 
@@ -2893,7 +2897,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSURF(plane),
-		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+		   i915_gem_object_ggtt_offset(obj, NULL) + intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 	} else {
@@ -2948,7 +2952,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
 	intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
 				intel_plane->base.state);
 
-	vma = i915_gem_obj_to_ggtt_view(obj, &view);
+	vma = i915_gem_object_to_ggtt(obj, &view);
 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
 		view.type))
 		return -1;
@@ -11562,6 +11566,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_engine_cs *ring;
 	bool mmio_flip;
 	struct drm_i915_gem_request *request = NULL;
+	struct i915_vma *vma;
 	int ret;
 
 	/*
@@ -11683,13 +11688,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 			goto cleanup_request;
 	}
 
-	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+	vma = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto cleanup_request;
+	}
 
-	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
-						  obj, 0);
+	work->gtt_offset = vma->node.start;
 	work->gtt_offset += intel_crtc->dspaddr_offset;
 
 	if (mmio_flip) {
@@ -13889,7 +13895,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
+		struct i915_vma *vma;
+
+		vma = intel_pin_and_fence_fb_obj(plane, fb, new_state);
+
+		if (IS_ERR(vma))
+			ret = PTR_ERR(vma);
 	}
 
 	if (ret == 0) {
@@ -14229,7 +14240,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
 	if (!obj)
 		addr = 0;
 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
-		addr = i915_gem_obj_ggtt_offset(obj);
+		addr = i915_gem_object_ggtt_offset(obj, NULL);
 	else
 		addr = obj->phys_handle->busaddr;
 
@@ -16019,7 +16030,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
 {
 	struct drm_crtc *c;
 	struct drm_i915_gem_object *obj;
-	int ret;
 
 	mutex_lock(&dev->struct_mutex);
 	intel_init_gt_powersave(dev);
@@ -16035,16 +16045,18 @@ void intel_modeset_gem_init(struct drm_device *dev)
 	 * for this.
 	 */
 	for_each_crtc(dev, c) {
+		struct i915_vma *vma;
+
 		obj = intel_fb_obj(c->primary->fb);
 		if (obj == NULL)
 			continue;
 
 		mutex_lock(&dev->struct_mutex);
-		ret = intel_pin_and_fence_fb_obj(c->primary,
+		vma = intel_pin_and_fence_fb_obj(c->primary,
 						 c->primary->fb,
 						 c->primary->state);
 		mutex_unlock(&dev->struct_mutex);
-		if (ret) {
+		if (IS_ERR(vma)) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 41e2e1c4d052..d33aebd2ed4e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -123,6 +123,7 @@ struct intel_framebuffer {
 struct intel_fbdev {
 	struct drm_fb_helper helper;
 	struct intel_framebuffer *fb;
+	struct i915_vma *vma;
 	int preferred_bpp;
 };
 
@@ -1149,9 +1150,10 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old,
 				    struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
-			       struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+			   struct drm_framebuffer *fb,
+			   const struct drm_plane_state *plane_state);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index a1988a486b92..8d8f1ce7f1ae 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -263,7 +263,7 @@ static void ilk_fbc_activate(struct intel_crtc *crtc)
 
 	y_offset = get_crtc_fence_y_offset(crtc);
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_object_ggtt_offset(obj, NULL) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 09840f4380f9..7decbca25dbb 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -184,9 +184,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
-	struct drm_i915_gem_object *obj;
-	int size, ret;
+	struct i915_vma *vma;
 	bool prealloc = false;
+	int ret;
 
 	if (intel_fb &&
 	    (sizes->fb_width > intel_fb->base.width ||
@@ -211,18 +211,17 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		sizes->fb_height = intel_fb->base.height;
 	}
 
-	obj = intel_fb->obj;
-	size = obj->base.size;
-
 	mutex_lock(&dev->struct_mutex);
 
 	/* Pin the GGTT vma for our access via info->screen_base.
 	 * This also validates that any existing fb inherited from the
 	 * BIOS is suitable for own access.
 	 */
-	ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
-	if (ret)
+	vma = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto out_unlock;
+	}
 
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
@@ -246,18 +245,19 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
-	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
-	info->fix.smem_len = size;
+	info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+	info->fix.smem_len = vma->node.size;
 
 	info->screen_base =
-		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-			   size);
+		ioremap_wc(dev_priv->gtt.mappable_base + vma->node.start,
+			   vma->node.size);
 	if (!info->screen_base) {
 		DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
 		ret = -ENOSPC;
 		goto out_destroy_fbi;
 	}
-	info->screen_size = size;
+	info->screen_size = vma->node.size;
+	ifbdev->vma = vma;
 
 	/* This driver doesn't need a VT switch to restore the mode on resume */
 	info->skip_vt_switch = true;
@@ -269,14 +269,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	 * If the object is stolen however, it will be full of whatever
 	 * garbage was left in there.
 	 */
-	if (ifbdev->fb->obj->stolen && !prealloc)
+	if (intel_fb->obj->stolen && !prealloc)
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
-		      fb->width, fb->height,
-		      i915_gem_obj_ggtt_offset(obj), obj);
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx\n",
+		      fb->width, fb->height, vma->node.start);
 
 	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -285,7 +284,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 out_unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	i915_gem_object_unpin_fence(vma->obj);
+	i915_gem_object_unpin_from_display_plane(vma);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -524,10 +524,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 static void intel_fbdev_destroy(struct drm_device *dev,
 				struct intel_fbdev *ifbdev)
 {
-	/* We rely on the object-free to release the VMA pinning for
-	 * the info->screen_base mmaping. Leaking the VMA is simpler than
-	 * trying to rectify all the possible error paths leading here.
-	 */
+	if (ifbdev->vma) {
+		i915_gem_object_unpin_fence(ifbdev->vma->obj);
+		i915_gem_object_unpin_from_display_plane(ifbdev->vma);
+	}
 
 	drm_fb_helper_unregister_fbi(&ifbdev->helper);
 	drm_fb_helper_release_fbi(&ifbdev->helper);
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index b447cfd58361..d1f3d0582d00 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -221,12 +221,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
  * Note that GuC needs the CSS header plus uKernel code to be copied by the
  * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  */
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
+			      struct i915_vma *vma)
 {
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
 	unsigned long offset;
-	struct sg_table *sg = fw_obj->pages;
+	struct sg_table *sg = vma->obj->pages;
 	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
 	int i, ret = 0;
 
@@ -243,7 +243,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
 	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
 	/* Set the source address for the new blob */
-	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
+	offset = vma->node.start + guc_fw->header_offset;
 	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
 	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
@@ -287,6 +287,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	struct drm_device *dev = dev_priv->dev;
+	struct i915_vma *vma;
 	int ret;
 
 	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -295,10 +296,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 		return ret;
 	}
 
-	ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
-	if (ret) {
-		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
-		return ret;
+	vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+	if (IS_ERR(vma)) {
+		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+		return PTR_ERR(vma);
 	}
 
 	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
@@ -339,7 +340,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 
 	set_guc_init_params(dev_priv);
 
-	ret = guc_ucode_xfer_dma(dev_priv);
+	ret = guc_ucode_xfer_dma(dev_priv, vma);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -347,7 +348,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 	 * We keep the object pages for reuse during resume. But we can unpin it
 	 * now that DMA has completed, so it doesn't continue to take up space.
 	 */
-	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+	i915_vma_unpin(vma);
 
 	return ret;
 }
@@ -560,9 +561,7 @@ fail:
 	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
 		  guc_fw->guc_fw_path, err);
 
-	obj = guc_fw->guc_fw_obj;
-	if (obj)
-		drm_gem_object_unreference(&obj->base);
+	drm_gem_object_unreference_unlocked(&guc_fw->guc_fw_obj->base);
 	guc_fw->guc_fw_obj = NULL;
 
 	release_firmware(fw);		/* OK even if fw is NULL */
@@ -633,11 +632,8 @@ void intel_guc_ucode_fini(struct drm_device *dev)
 	direct_interrupts_to_host(dev_priv);
 	i915_guc_submission_fini(dev);
 
-	mutex_lock(&dev->struct_mutex);
-	if (guc_fw->guc_fw_obj)
-		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+	drm_gem_object_unreference_unlocked(&guc_fw->guc_fw_obj->base);
 	guc_fw->guc_fw_obj = NULL;
-	mutex_unlock(&dev->struct_mutex);
 
 	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 206311b55e71..68d06ab6acdc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -232,7 +232,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ctx,
 static int intel_lr_context_pin(struct intel_context *ctx,
 				struct intel_engine_cs *engine);
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *default_ctx_obj);
+					   struct i915_vma *vma);
 
 
 /**
@@ -570,41 +570,41 @@ static int intel_lr_context_pin(struct intel_context *ctx,
 				struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct drm_i915_gem_object *ctx_obj;
+	struct i915_vma *vma;
 	struct intel_ring *ring;
 	u32 ggtt_offset;
-	int ret = 0;
+	int ret;
 
 	if (ctx->engine[engine->id].pin_count++)
 		return 0;
 
 	lockdep_assert_held(&engine->dev->struct_mutex);
 
-	ctx_obj = ctx->engine[engine->id].state;
-	ret = i915_gem_object_ggtt_pin(ctx_obj, NULL,
+	vma = i915_gem_object_ggtt_pin(ctx->engine[engine->id].state, NULL,
 				       0, GEN8_LR_CONTEXT_ALIGN,
 				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP |
 				       PIN_HIGH);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err;
+	}
 
 	ring = ctx->engine[engine->id].ring;
 	ret = intel_ring_map(ring);
 	if (ret)
-		goto unpin_ctx_obj;
+		goto unpin;
 
 	i915_gem_context_reference(ctx);
-	ctx_obj->dirty = true;
+	ctx->engine[engine->id].vma = vma;
+	vma->obj->dirty = true;
 
-	ggtt_offset =
-		i915_gem_obj_ggtt_offset(ctx_obj) + LRC_PPHWSP_PN * PAGE_SIZE;
+	ggtt_offset = vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
 	ring->context_descriptor =
 		ggtt_offset | engine->execlist_context_descriptor;
 
 	ring->registers =
-		kmap(i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN));
-	ring->registers[CTX_RING_BUFFER_START+1] =
-		i915_gem_obj_ggtt_offset(ring->obj);
+		kmap(i915_gem_object_get_dirty_page(vma->obj, LRC_STATE_PN));
+	ring->registers[CTX_RING_BUFFER_START+1] = ring->vma->node.start;
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission)
@@ -612,8 +612,8 @@ static int intel_lr_context_pin(struct intel_context *ctx,
 
 	return 0;
 
-unpin_ctx_obj:
-	i915_gem_object_ggtt_unpin(ctx_obj);
+unpin:
+	__i915_vma_unpin(vma);
 err:
 	ctx->engine[engine->id].pin_count = 0;
 	return ret;
@@ -622,7 +622,7 @@ err:
 void intel_lr_context_unpin(struct intel_context *ctx,
 			    struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_object *ctx_obj;
+	struct i915_vma *vma;
 
 	lockdep_assert_held(&engine->dev->struct_mutex);
 	if (--ctx->engine[engine->id].pin_count)
@@ -630,9 +630,9 @@ void intel_lr_context_unpin(struct intel_context *ctx,
 
 	intel_ring_unmap(ctx->engine[engine->id].ring);
 
-	ctx_obj = ctx->engine[engine->id].state;
-	kunmap(i915_gem_object_get_page(ctx_obj, LRC_STATE_PN));
-	i915_gem_object_ggtt_unpin(ctx_obj);
+	vma = ctx->engine[engine->id].vma;
+	kunmap(i915_gem_object_get_page(vma->obj, LRC_STATE_PN));
+	i915_vma_unpin(vma);
 
 	i915_gem_context_unreference(ctx);
 }
@@ -925,43 +925,41 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
 	return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static struct i915_vma *
+lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
 {
-	int ret;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
-	if (!ring->wa_ctx.obj) {
-		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
-		return -ENOMEM;
-	}
+	obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
 
-	ret = i915_gem_object_ggtt_pin(ring->wa_ctx.obj, NULL, 0, PAGE_SIZE, 0);
-	if (ret) {
-		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
-				 ret);
-		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
-		return ret;
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, 0);
+	if (IS_ERR(vma)) {
+		drm_gem_object_unreference(&obj->base);
+		return vma;
 	}
 
-	return 0;
+	return vma;
 }
 
 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
 {
-	if (ring->wa_ctx.obj) {
-		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
-		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
-		ring->wa_ctx.obj = NULL;
+	if (ring->wa_ctx.vma) {
+		i915_vma_unpin(ring->wa_ctx.vma);
+		drm_gem_object_unreference(&ring->wa_ctx.vma->obj->base);
+		ring->wa_ctx.vma = NULL;
 	}
 }
 
 static int intel_init_workaround_bb(struct intel_engine_cs *ring)
 {
-	int ret;
+	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
 	uint32_t *batch;
 	uint32_t offset;
 	struct page *page;
-	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+	int ret;
 
 	WARN_ON(ring->id != RCS);
 
@@ -978,15 +976,17 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
 		return -EINVAL;
 	}
 
-	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
-	if (ret) {
+	wa_ctx->vma = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	if (IS_ERR(wa_ctx->vma)) {
+		ret = PTR_ERR(wa_ctx->vma);
 		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
 		return ret;
 	}
 
-	page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
+	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
 	batch = kmap_atomic(page);
 	offset = 0;
+	ret = 0;
 
 	if (INTEL_INFO(ring->dev)->gen == 8) {
 		ret = gen8_init_indirectctx_bb(ring,
@@ -1060,7 +1060,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	lrc_setup_hardware_status_page(ring,
-			dev_priv->kernel_context->engine[ring->id].state);
+			dev_priv->kernel_context->engine[ring->id].vma);
 
 	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
 	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1422,9 +1422,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 
 	intel_engine_fini_breadcrumbs(ring);
 
-	if (ring->status_page.obj) {
-		kunmap(sg_page(ring->status_page.obj->pages->sgl));
-		ring->status_page.obj = NULL;
+	if (ring->status_page.vma) {
+		kunmap(sg_page(ring->status_page.vma->obj->pages->sgl));
+		ring->status_page.vma = NULL;
 	}
 	intel_lr_context_unpin(ring->i915->kernel_context, ring);
 
@@ -1799,9 +1799,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 		ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
 		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
 		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
-		if (ring->wa_ctx.obj) {
+		if (ring->wa_ctx.vma) {
 			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
-			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+			uint32_t ggtt_offset = wa_ctx->vma->node.start;
 
 			reg_state[CTX_RCS_INDIRECT_CTX+1] =
 				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
@@ -1920,17 +1920,17 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
 }
 
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *default_ctx_obj)
+					   struct i915_vma *vma)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct page *page;
 
 	/* The HWSP is part of the default context object in LRC mode. */
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
-			+ LRC_PPHWSP_PN * PAGE_SIZE;
-	page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
+	ring->status_page.gfx_addr =
+	       	vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+	page = i915_gem_object_get_page(vma->obj, LRC_PPHWSP_PN);
 	ring->status_page.page_addr = kmap(page);
-	ring->status_page.obj = default_ctx_obj;
+	ring->status_page.vma = vma;
 
 	I915_WRITE(RING_HWS_PGA(ring->mmio_base),
 			(u32)ring->status_page.gfx_addr);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 414a321b752f..d1401f4c4762 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,8 +170,8 @@ struct overlay_registers {
 struct intel_overlay {
 	struct drm_device *dev;
 	struct intel_crtc *crtc;
-	struct drm_i915_gem_object *vid_bo;
-	struct drm_i915_gem_object *old_vid_bo;
+	struct drm_i915_gem_object *vid_bo, *old_vid_bo;
+	struct i915_vma *vid_vma, *old_vid_vma;
 	bool active;
 	bool pfit_active;
 	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -197,7 +197,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
 		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
 	else
 		regs = io_mapping_map_wc(dev_priv->gtt.mappable,
-					 i915_gem_obj_ggtt_offset(overlay->reg_bo));
+					 overlay->flip_addr);
 
 	return regs;
 }
@@ -308,7 +308,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
-	i915_gem_object_ggtt_unpin(obj);
+	i915_gem_object_unpin_from_display_plane(overlay->old_vid_vma);
 	drm_gem_object_unreference(&obj->base);
 
 	overlay->old_vid_bo = NULL;
@@ -316,14 +316,13 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 
 static void intel_overlay_off_tail(struct intel_overlay *overlay)
 {
-	struct drm_i915_gem_object *obj = overlay->vid_bo;
-
 	/* never have the overlay hw on without showing a frame */
-	if (WARN_ON(!obj))
+	if (WARN_ON(overlay->vid_vma))
 		return;
 
-	i915_gem_object_ggtt_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_unpin_from_display_plane(overlay->vid_vma);
+	drm_gem_object_unreference(&overlay->vid_bo->base);
+	overlay->vid_vma = NULL;
 	overlay->vid_bo = NULL;
 
 	overlay->crtc->overlay = NULL;
@@ -741,6 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	struct drm_device *dev = overlay->dev;
 	u32 swidth, swidthsw, sheight, ostride;
 	enum pipe pipe = overlay->crtc->pipe;
+	struct i915_vma *vma;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -749,10 +749,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
+	vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
 						   &i915_ggtt_view_normal);
-	if (ret != 0)
-		return ret;
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
 	ret = i915_gem_object_put_fence(new_bo);
 	if (ret)
@@ -795,7 +795,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	swidth = params->src_w;
 	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
 	sheight = params->src_h;
-	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+	iowrite32(vma->node.start + params->offset_Y, &regs->OBUF_0Y);
 	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -809,8 +809,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      params->src_w/uv_hscale);
 		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
 		sheight |= (params->src_h/uv_vscale) << 16;
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+		iowrite32(vma->node.start + params->offset_U, &regs->OBUF_0U);
+		iowrite32(vma->node.start + params->offset_V, &regs->OBUF_0V);
 		ostride |= params->stride_UV << 16;
 	}
 
@@ -835,7 +835,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 			  INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->old_vid_vma = overlay->vid_vma;
 	overlay->vid_bo = new_bo;
+	overlay->vid_vma = vma;
 
 	intel_frontbuffer_flip(dev,
 			       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -843,7 +845,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	return 0;
 
 out_unpin:
-	i915_gem_object_ggtt_unpin(new_bo);
+	i915_gem_object_unpin_from_display_plane(vma);
 	return ret;
 }
 
@@ -1374,6 +1376,7 @@ void intel_setup_overlay(struct drm_device *dev)
 	struct intel_overlay *overlay;
 	struct drm_i915_gem_object *reg_bo;
 	struct overlay_registers __iomem *regs;
+	struct i915_vma *vma = NULL;
 	int ret;
 
 	if (!HAS_OVERLAY(dev))
@@ -1406,13 +1409,14 @@ void intel_setup_overlay(struct drm_device *dev)
 		}
 		overlay->flip_addr = reg_bo->phys_handle->busaddr;
 	} else {
-		ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
+		vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
 					       0, PAGE_SIZE, PIN_MAPPABLE);
-		if (ret) {
+		if (IS_ERR(vma)) {
 			DRM_ERROR("failed to pin overlay register bo\n");
+			ret = PTR_ERR(vma);
 			goto out_free_bo;
 		}
-		overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+		overlay->flip_addr = vma->node.start;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1444,8 +1448,8 @@ void intel_setup_overlay(struct drm_device *dev)
 	return;
 
 out_unpin_bo:
-	if (!OVERLAY_NEEDS_PHYSICAL(dev))
-		i915_gem_object_ggtt_unpin(reg_bo);
+	if (vma)
+		i915_vma_unpin(vma);
 out_free_bo:
 	drm_gem_object_unreference(&reg_bo->base);
 out_free:
@@ -1490,7 +1494,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 			overlay->reg_bo->phys_handle->vaddr;
 	else
 		regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-						i915_gem_obj_ggtt_offset(overlay->reg_bo));
+						overlay->flip_addr);
 
 	return regs;
 }
@@ -1523,7 +1527,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
 	else
-		error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+		error->base = overlay->flip_addr;
 
 	regs = intel_overlay_map_regs_atomic(overlay);
 	if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6db7f93a3c1d..dbc76cd54c3e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -534,7 +534,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring *ringbuf = ring->buffer;
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct i915_vma *vma = ringbuf->vma;
 	int ret = 0;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -574,7 +574,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
 	 * registers with the above sequence (the readback of the HEAD registers
 	 * also enforces ordering), otherwise the hw might lose the new ring
 	 * register values. */
-	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+	I915_WRITE_START(ring, vma->node.start);
 
 	/* WaClearRingBufHeadRegAtInit:ctg,elk */
 	if (I915_READ_HEAD(ring))
@@ -589,14 +589,14 @@ static int init_ring_common(struct intel_engine_cs *ring)
 
 	/* If the head is still not zero, the ring is dead */
 	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
+		     I915_READ_START(ring) == vma->node.start &&
 		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
 		DRM_ERROR("%s initialization failed "
 			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
 			  ring->name,
 			  I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
 			  I915_READ_HEAD(ring), I915_READ_TAIL(ring),
-			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+			  I915_READ_START(ring), (unsigned long)vma->node.start);
 		ret = -EIO;
 		goto out;
 	}
@@ -622,10 +622,11 @@ intel_fini_pipe_control(struct intel_engine_cs *ring)
 	if (ring->scratch.obj == NULL)
 		return;
 
-	if (INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_INFO(dev)->gen >= 5)
 		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-		i915_gem_object_ggtt_unpin(ring->scratch.obj);
-	}
+
+	if (ring->scratch.vma)
+		i915_vma_unpin(ring->scratch.vma);
 
 	drm_gem_object_unreference(&ring->scratch.obj->base);
 	ring->scratch.obj = NULL;
@@ -634,6 +635,7 @@ intel_fini_pipe_control(struct intel_engine_cs *ring)
 int
 intel_init_pipe_control(struct intel_engine_cs *ring)
 {
+	struct i915_vma *vma;
 	int ret;
 
 	WARN_ON(ring->scratch.obj);
@@ -649,12 +651,14 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
 	if (ret)
 		goto err_unref;
 
-	ret = i915_gem_object_ggtt_pin(ring->scratch.obj, NULL,
+	vma = i915_gem_object_ggtt_pin(ring->scratch.obj, NULL,
 				       0, 4096, PIN_HIGH);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err_unref;
+	}
 
-	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+	ring->scratch.gtt_offset = vma->node.start;
 	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
 	if (ring->scratch.cpu_page == NULL) {
 		ret = -ENOMEM;
@@ -663,10 +667,11 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
 			 ring->name, ring->scratch.gtt_offset);
+	ring->scratch.vma = vma;
 	return 0;
 
 err_unpin:
-	i915_gem_object_ggtt_unpin(ring->scratch.obj);
+	i915_vma_unpin(vma);
 err_unref:
 	drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
@@ -1167,10 +1172,13 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->semaphore_obj) {
-		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
-		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
-		dev_priv->semaphore_obj = NULL;
+	if (dev_priv->semaphore_vma) {
+		struct drm_i915_gem_object *obj = dev_priv->semaphore_vma->obj;
+
+		i915_vma_unpin(dev_priv->semaphore_vma);
+		dev_priv->semaphore_vma = NULL;
+
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	intel_fini_pipe_control(ring);
@@ -1806,67 +1814,70 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 
 static void cleanup_status_page(struct intel_engine_cs *ring)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	obj = ring->status_page.obj;
-	if (obj == NULL)
+	vma = ring->status_page.vma;
+	if (vma == NULL)
 		return;
+	ring->status_page.vma = NULL;
 
-	kunmap(sg_page(obj->pages->sgl));
-	i915_gem_object_ggtt_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-	ring->status_page.obj = NULL;
+	kunmap(sg_page(vma->obj->pages->sgl));
+	i915_vma_unpin(vma);
+
+	drm_gem_object_unreference(&vma->obj->base);
 }
 
 static int init_status_page(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	unsigned flags;
+	int ret;
 
-	if ((obj = ring->status_page.obj) == NULL) {
-		unsigned flags;
-		int ret;
+	if (ring->status_page.vma)
+		return 0;
 
-		obj = i915_gem_alloc_object(ring->dev, 4096);
-		if (obj == NULL) {
-			DRM_ERROR("Failed to allocate status page\n");
-			return -ENOMEM;
-		}
+	obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		return -ENOMEM;
+	}
 
-		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-		if (ret)
-			goto err_unref;
-
-		flags = 0;
-		if (!HAS_LLC(ring->dev))
-			/* On g33, we cannot place HWS above 256MiB, so
-			 * restrict its pinning to the low mappable arena.
-			 * Though this restriction is not documented for
-			 * gen4, gen5, or byt, they also behave similarly
-			 * and hang if the HWS is placed at the top of the
-			 * GTT. To generalise, it appears that all !llc
-			 * platforms have issues with us placing the HWS
-			 * above the mappable region (even though we never
-			 * actualy map it).
-			 */
-			flags |= PIN_MAPPABLE;
-		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
-		if (ret) {
-err_unref:
-			drm_gem_object_unreference(&obj->base);
-			return ret;
-		}
+	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	if (ret)
+		goto err_unref;
 
-		ring->status_page.obj = obj;
+	flags = 0;
+	if (!HAS_LLC(ring->dev))
+		/* On g33, we cannot place HWS above 256MiB, so
+		 * restrict its pinning to the low mappable arena.
+		 * Though this restriction is not documented for
+		 * gen4, gen5, or byt, they also behave similarly
+		 * and hang if the HWS is placed at the top of the
+		 * GTT. To generalise, it appears that all !llc
+		 * platforms have issues with us placing the HWS
+		 * above the mappable region (even though we never
+		 * actualy map it).
+		 */
+		flags |= PIN_MAPPABLE;
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
 	}
 
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+	ring->status_page.vma = vma;
+	ring->status_page.gfx_addr = vma->node.start;
 	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			ring->name, ring->status_page.gfx_addr);
 
 	return 0;
+
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+	return ret;
 }
 
 static int init_phys_status_page(struct intel_engine_cs *ring)
@@ -1889,14 +1900,15 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
 int intel_ring_map(struct intel_ring *ring)
 {
 	struct drm_i915_gem_object *obj = ring->obj;
+	struct i915_vma *vma;
 	int ret;
 
 	if (HAS_LLC(ring->engine->i915) && !obj->stolen) {
-		ret = i915_gem_object_ggtt_pin(obj, NULL,
+		vma = i915_gem_object_ggtt_pin(obj, NULL,
 					       0, PAGE_SIZE,
 					       PIN_HIGH);
-		if (ret)
-			return ret;
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
 
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
 		if (ret)
@@ -1909,18 +1921,18 @@ int intel_ring_map(struct intel_ring *ring)
 			goto unpin;
 		}
 	} else {
-		ret = i915_gem_object_ggtt_pin(obj, NULL,
+		vma = i915_gem_object_ggtt_pin(obj, NULL,
 					       0, PAGE_SIZE,
 					       PIN_MAPPABLE);
-		if (ret)
-			return ret;
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
 
 		ret = i915_gem_object_set_to_gtt_domain(obj, true);
 		if (ret)
 			goto unpin;
 
 		ring->virtual_start = ioremap_wc(ring->engine->i915->gtt.mappable_base +
-						 i915_gem_obj_ggtt_offset(obj),
+						 vma->node.start,
 						 ring->size);
 		if (ring->virtual_start == NULL) {
 			ret = -ENOMEM;
@@ -1928,10 +1940,11 @@ int intel_ring_map(struct intel_ring *ring)
 		}
 	}
 
+	ring->vma = vma;
 	return 0;
 
 unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	i915_vma_unpin(vma);
 	return ret;
 }
 
@@ -1941,7 +1954,9 @@ void intel_ring_unmap(struct intel_ring *ring)
 		i915_gem_object_unpin_vmap(ring->obj);
 	else
 		iounmap(ring->virtual_start);
-	i915_gem_object_ggtt_unpin(ring->obj);
+
+	i915_vma_unpin(ring->vma);
+	ring->vma = NULL;
 }
 
 static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
@@ -2507,16 +2522,20 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
 				i915.semaphores = 0;
 			} else {
+				struct i915_vma *vma;
+
 				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-				ret = i915_gem_object_ggtt_pin(obj, NULL,
+				vma = i915_gem_object_ggtt_pin(obj, NULL,
 							       0, 0,
 							       PIN_HIGH);
-				if (ret != 0) {
+				if (IS_ERR(vma)) {
 					drm_gem_object_unreference(&obj->base);
 					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
 					i915.semaphores = 0;
-				} else
-					dev_priv->semaphore_obj = obj;
+					vma = NULL;
+				}
+
+				dev_priv->semaphore_vma = vma;
 			}
 		}
 
@@ -2527,8 +2546,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		ring->irq_disable = gen8_ring_disable_irq;
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->irq_seqno_barrier = gen6_seqno_barrier;
-		if (i915.semaphores) {
-			WARN_ON(!dev_priv->semaphore_obj);
+		if (dev_priv->semaphore_vma) {
 			ring->semaphore.sync_to = gen8_ring_sync;
 			ring->semaphore.signal = gen8_rcs_signal;
 			GEN8_RING_SEMAPHORE_INIT;
@@ -2604,21 +2622,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
+		struct i915_vma *vma;
+
 		obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
 		if (obj == NULL) {
 			DRM_ERROR("Failed to allocate batch bo\n");
 			return -ENOMEM;
 		}
 
-		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
-		if (ret != 0) {
+		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+		if (IS_ERR(vma)) {
 			drm_gem_object_unreference(&obj->base);
-			DRM_ERROR("Failed to ping batch bo\n");
-			return ret;
+			DRM_ERROR("Failed to pin batch bo\n");
+			return PTR_ERR(vma);
 		}
 
 		ring->scratch.obj = obj;
-		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+		ring->scratch.vma = vma;
+		ring->scratch.gtt_offset = vma->node.start;
 	}
 
 	ret = intel_init_engine(dev, ring);
@@ -2656,7 +2677,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			ring->irq_enable = gen8_ring_enable_irq;
 			ring->irq_disable = gen8_ring_disable_irq;
 			ring->emit_bb_start = gen8_emit_bb_start;
-			if (i915.semaphores) {
+			if (dev_priv->semaphore_vma) {
 				ring->semaphore.sync_to = gen8_ring_sync;
 				ring->semaphore.signal = gen8_xcs_signal;
 				GEN8_RING_SEMAPHORE_INIT;
@@ -2721,7 +2742,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	ring->irq_enable = gen8_ring_enable_irq;
 	ring->irq_disable = gen8_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
-	if (i915.semaphores) {
+	if (dev_priv->semaphore_vma) {
 		ring->semaphore.sync_to = gen8_ring_sync;
 		ring->semaphore.signal = gen8_xcs_signal;
 		GEN8_RING_SEMAPHORE_INIT;
@@ -2749,7 +2770,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 		ring->irq_enable = gen8_ring_enable_irq;
 		ring->irq_disable = gen8_ring_disable_irq;
 		ring->emit_bb_start = gen8_emit_bb_start;
-		if (i915.semaphores) {
+		if (dev_priv->semaphore_vma) {
 			ring->semaphore.sync_to = gen8_ring_sync;
 			ring->semaphore.signal = gen8_xcs_signal;
 			GEN8_RING_SEMAPHORE_INIT;
@@ -2805,7 +2826,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		ring->irq_enable = gen8_ring_enable_irq;
 		ring->irq_disable = gen8_ring_disable_irq;
 		ring->emit_bb_start = gen8_emit_bb_start;
-		if (i915.semaphores) {
+		if (dev_priv->semaphore_vma) {
 			ring->semaphore.sync_to = gen8_ring_sync;
 			ring->semaphore.signal = gen8_xcs_signal;
 			GEN8_RING_SEMAPHORE_INIT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 868cc8d5abb3..894eb8089296 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -25,10 +25,10 @@
  */
 #define I915_RING_FREE_SPACE 64
 
-struct  intel_hw_status_page {
+struct intel_hw_status_page {
 	u32		*page_addr;
 	unsigned int	gfx_addr;
-	struct		drm_i915_gem_object *obj;
+	struct		i915_vma *vma;
 };
 
 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
@@ -54,19 +54,16 @@ struct  intel_hw_status_page {
  */
 #define i915_semaphore_seqno_size sizeof(uint64_t)
 #define GEN8_SIGNAL_OFFSET(__ring, to)			     \
-	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	(dev_priv->semaphore_vma->node.start + \
 	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
 	(i915_semaphore_seqno_size * (to)))
 
 #define GEN8_WAIT_OFFSET(__ring, from)			     \
-	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	(dev_priv->semaphore_vma->node.start + \
 	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
 	(i915_semaphore_seqno_size * (__ring)->id))
 
 #define GEN8_RING_SEMAPHORE_INIT do { \
-	if (!dev_priv->semaphore_obj) { \
-		break; \
-	} \
 	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
 	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
 	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
@@ -99,6 +96,7 @@ struct intel_engine_hangcheck {
 
 struct intel_ring {
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	void *virtual_start;
 
 	struct intel_engine_cs *engine;
@@ -146,7 +144,7 @@ struct  i915_ctx_workarounds {
 		u32 offset;
 		u32 size;
 	} indirect_ctx, per_ctx;
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 };
 
 struct drm_i915_gem_request;
@@ -322,6 +320,7 @@ struct intel_engine_cs {
 
 	struct {
 		struct drm_i915_gem_object *obj;
+		struct i915_vma *vma;
 		u32 gtt_offset;
 		volatile u32 *cpu_page;
 	} scratch;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4d448b990c50..768989f578cb 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -461,8 +461,8 @@ vlv_update_plane(struct drm_plane *dplane,
 
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
-	I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
-		   sprsurf_offset);
+	I915_WRITE(SPSURF(pipe, plane),
+		   i915_gem_object_ggtt_offset(obj, NULL) + sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -603,7 +603,7 @@ ivb_update_plane(struct drm_plane *plane,
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRSURF(pipe),
-		   i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+		   i915_gem_object_ggtt_offset(obj, NULL) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 }
 
@@ -733,7 +733,7 @@ ilk_update_plane(struct drm_plane *plane,
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSSURF(pipe),
-		   i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+		   i915_gem_object_ggtt_offset(obj, NULL) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 }
 
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list