[PATCH 6/8] drm/vmwgfx: Refactor resource management

Thomas Hellstrom thellstrom at vmware.com
Fri Nov 9 04:26:16 PST 2012


Refactor resource management to make it easy to hook up resources
that are backed up by buffers. In particular, resources and their
backing buffers can be evicted and rebound, if supported by the device.
To avoid query deadlocks, the query code is also modified somewhat.

Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
Reviewed-by: Brian Paul <brianp at vmware.com>
Reviewed-by: Dmitry Torokhov <dtor at vmware.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c   |    7 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c      |   32 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h      |  145 ++-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |  900 +++++++++++++-----
 drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c    |    7 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1528 ++++++++++++++++++++----------
 6 files changed, 1802 insertions(+), 817 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index f386cea..e88b0eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,7 +60,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
 		return ret;
 
 	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+		vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
 		return ret;
 
 	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+		vmw_execbuf_release_pinned_bo(dev_priv);
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
 		goto err_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f233473..46b121c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	struct vmw_private *dev_priv;
 	int ret;
 	uint32_t svga_id;
+	enum vmw_res_type i;
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	rwlock_init(&dev_priv->resource_lock);
-	idr_init(&dev_priv->context_idr);
-	idr_init(&dev_priv->surface_idr);
-	idr_init(&dev_priv->stream_idr);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i) {
+		idr_init(&dev_priv->res_idr[i]);
+		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+	}
+
 	mutex_init(&dev_priv->init_mutex);
 	init_waitqueue_head(&dev_priv->fence_queue);
 	init_waitqueue_head(&dev_priv->fifo_queue);
 	dev_priv->fence_queue_waiters = 0;
 	atomic_set(&dev_priv->fifo_queue_waiters, 0);
-	INIT_LIST_HEAD(&dev_priv->surface_lru);
+
 	dev_priv->used_memory_size = 0;
 
 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -670,9 +674,9 @@ out_err2:
 out_err1:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
-	idr_destroy(&dev_priv->surface_idr);
-	idr_destroy(&dev_priv->context_idr);
-	idr_destroy(&dev_priv->stream_idr);
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
 	kfree(dev_priv);
 	return ret;
 }
@@ -680,9 +684,12 @@ out_err0:
 static int vmw_driver_unload(struct drm_device *dev)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	enum vmw_res_type i;
 
 	unregister_pm_notifier(&dev_priv->pm_nb);
 
+	if (dev_priv->ctx.res_ht_initialized)
+		drm_ht_remove(&dev_priv->ctx.res_ht);
 	if (dev_priv->ctx.cmd_bounce)
 		vfree(dev_priv->ctx.cmd_bounce);
 	if (dev_priv->enable_fb) {
@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 	(void)ttm_bo_device_release(&dev_priv->bdev);
 	vmw_ttm_global_release(dev_priv);
-	idr_destroy(&dev_priv->surface_idr);
-	idr_destroy(&dev_priv->context_idr);
-	idr_destroy(&dev_priv->stream_idr);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
 
 	kfree(dev_priv);
 
@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
 	vmw_fp->locked_master = drm_master_get(file_priv->master);
 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	if (unlikely((ret != 0))) {
 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 */
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+		vmw_execbuf_release_pinned_bo(dev_priv);
+		vmw_resource_evict_all(dev_priv);
 		ttm_bo_swapout_all(&dev_priv->bdev);
 
 		break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7c6f6e3..7aefb9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
 
 struct vmw_dma_buffer {
 	struct ttm_buffer_object base;
-	struct list_head validate_list;
-	bool gmr_bound;
-	uint32_t cur_validate_node;
-	bool on_validate_list;
+	struct list_head res_list;
 };
 
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+	struct ttm_validate_buffer base;
+	struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
 struct vmw_resource {
 	struct kref kref;
 	struct vmw_private *dev_priv;
-	struct idr *idr;
 	int id;
-	enum ttm_object_type res_type;
 	bool avail;
-	void (*remove_from_lists) (struct vmw_resource *res);
-	void (*hw_destroy) (struct vmw_resource *res);
+	unsigned long backup_size;
+	bool res_dirty; /* Protected by backup buffer reserved */
+	bool backup_dirty; /* Protected by backup buffer reserved */
+	struct vmw_dma_buffer *backup;
+	unsigned long backup_offset;
+	const struct vmw_res_func *func;
+	struct list_head lru_head; /* Protected by the resource lock */
+	struct list_head mob_head; /* Protected by @backup reserved */
 	void (*res_free) (struct vmw_resource *res);
-	struct list_head validate_head;
-	struct list_head query_head; /* Protected by the cmdbuf mutex */
-	/* TODO is a generic snooper needed? */
-#if 0
-	void (*snoop)(struct vmw_resource *res,
-		      struct ttm_object_file *tfile,
-		      SVGA3dCmdHeader *header);
-	void *snoop_priv;
-#endif
+	void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+	vmw_res_context,
+	vmw_res_surface,
+	vmw_res_stream,
+	vmw_res_max
 };
 
 struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
 
 struct vmw_surface {
 	struct vmw_resource res;
-	struct list_head lru_head; /* Protected by the resource lock */
 	uint32_t flags;
 	uint32_t format;
 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+	struct drm_vmw_size base_size;
 	struct drm_vmw_size *sizes;
 	uint32_t num_sizes;
-
 	bool scanout;
-
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
-	struct ttm_buffer_object *backup;
 	struct vmw_surface_offset *offsets;
-	uint32_t backup_size;
+	SVGA3dTextureFilter autogen_filter;
+	uint32_t multisample_count;
 };
 
 struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
 	uint32_t index;
 };
 
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+	bool valid;
+	uint32_t handle;
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+};
+
 struct vmw_sw_context{
-	struct ida bo_list;
-	uint32_t last_cid;
-	bool cid_valid;
+	struct drm_open_hash res_ht;
+	bool res_ht_initialized;
 	bool kernel; /**< is the called made from the kernel */
-	struct vmw_resource *cur_ctx;
-	uint32_t last_sid;
-	uint32_t sid_translation;
-	bool sid_valid;
 	struct ttm_object_file *tfile;
 	struct list_head validate_nodes;
 	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
 	uint32_t cur_reloc;
-	struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
 	uint32_t cur_val_buf;
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	struct list_head resource_list;
 	uint32_t fence_flags;
-	struct list_head query_list;
 	struct ttm_buffer_object *cur_query_bo;
-	uint32_t cur_query_cid;
-	bool query_cid_valid;
+	struct list_head res_relocations;
+	uint32_t *buf_start;
+	struct vmw_res_cache_entry res_cache[vmw_res_max];
+	struct vmw_resource *last_query_ctx;
+	bool needs_post_query_barrier;
+	struct vmw_resource *error_resource;
 };
 
 struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
 	 */
 
 	rwlock_t resource_lock;
-	struct idr context_idr;
-	struct idr surface_idr;
-	struct idr stream_idr;
-
+	struct idr res_idr[vmw_res_max];
 	/*
 	 * Block lastclose from racing with firstopen.
 	 */
@@ -320,6 +347,7 @@ struct vmw_private {
 	struct ttm_buffer_object *dummy_query_bo;
 	struct ttm_buffer_object *pinned_bo;
 	uint32_t query_cid;
+	uint32_t query_cid_valid;
 	bool dummy_query_bo_pinned;
 
 	/*
@@ -329,10 +357,15 @@ struct vmw_private {
 	 * protected by the cmdbuf mutex for simplicity.
 	 */
 
-	struct list_head surface_lru;
+	struct list_head res_lru[vmw_res_max];
 	uint32_t used_memory_size;
 };
 
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_surface, res);
+}
+
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
 {
 	return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 /**
  * Resource utilities - vmwgfx_resource.c
  */
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
 
 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  uint32_t handle,
 				  struct vmw_surface **out_surf,
 				  struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+	struct vmw_private *dev_priv,
+	struct ttm_object_file *tfile,
+	uint32_t handle,
+	const struct vmw_user_resource_conv *converter,
+	struct vmw_resource **p_res);
 extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
-			    struct vmw_surface *srf,
-			    void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-					  struct ttm_object_file *tfile,
-					  uint32_t handle,
-					  struct vmw_surface **out);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +478,15 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   struct vmw_dma_buffer *new_backup,
+				   unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+				     struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+				struct vmw_fence_obj *fence,
+				void *sync_obj_arg);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +584,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-			      bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+					    struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 
 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 				      struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4..1e43d5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
 
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+	struct list_head head;
+	const struct vmw_resource *res;
+	unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+	struct list_head head;
+	struct drm_hash_item hash;
+	struct vmw_resource *res;
+	struct vmw_dma_buffer *new_backup;
+	unsigned long new_backup_offset;
+	bool first_usage;
+	bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+					bool backoff)
+{
+	struct vmw_resource_val_node *val;
+
+	list_for_each_entry(val, list, head) {
+		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *new_backup =
+			backoff ? NULL : val->new_backup;
+
+		vmw_resource_unreserve(res, new_backup,
+			val->new_backup_offset);
+		vmw_dmabuf_unreference(&val->new_backup);
+	}
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *res,
+				struct vmw_resource_val_node **p_node)
+{
+	struct vmw_resource_val_node *node;
+	struct drm_hash_item *hash;
+	int ret;
+
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+				    &hash) == 0)) {
+		node = container_of(hash, struct vmw_resource_val_node, hash);
+		node->first_usage = false;
+		if (unlikely(p_node != NULL))
+			*p_node = node;
+		return 0;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (unlikely(node == NULL)) {
+		DRM_ERROR("Failed to allocate a resource validation "
+			  "entry.\n");
+		return -ENOMEM;
+	}
+
+	node->hash.key = (unsigned long) res;
+	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to initialize a resource validation "
+			  "entry.\n");
+		kfree(node);
+		return ret;
+	}
+	list_add_tail(&node->head, &sw_context->resource_list);
+	node->res = vmw_resource_reference(res);
+	node->first_usage = true;
+
+	if (unlikely(p_node != NULL))
+		*p_node = node;
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+				       const struct vmw_resource *res,
+				       unsigned long offset)
+{
+	struct vmw_resource_relocation *rel;
+
+	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+	if (unlikely(rel == NULL)) {
+		DRM_ERROR("Failed to allocate a resource relocation.\n");
+		return -ENOMEM;
+	}
+
+	rel->res = res;
+	rel->offset = offset;
+	list_add_tail(&rel->head, list);
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+	struct vmw_resource_relocation *rel, *n;
+
+	list_for_each_entry_safe(rel, n, list, head) {
+		list_del(&rel->head);
+		kfree(rel);
+	}
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+					   struct list_head *list)
+{
+	struct vmw_resource_relocation *rel;
+
+	list_for_each_entry(rel, list, head)
+		cb[rel->offset] = rel->res->id;
+}
+
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 			   struct vmw_sw_context *sw_context,
 			   SVGA3dCmdHeader *header)
@@ -44,23 +219,12 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
 	return 0;
 }
 
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
-					  struct vmw_resource **p_res)
-{
-	struct vmw_resource *res = *p_res;
-
-	if (list_empty(&res->validate_head)) {
-		list_add_tail(&res->validate_head, &sw_context->resource_list);
-		*p_res = NULL;
-	} else
-		vmw_resource_unreference(p_res);
-}
-
 /**
  * vmw_bo_to_validate_list - add a bo to a validate list
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
+ * @already_reserved: Whether the buffer object is already reserved.
  * @fence_flags: Fence flags to be or'ed with any other fence flags for
  * this buffer on this submission batch.
  * @p_val_node: If non-NULL Will be updated with the validate node number
@@ -75,22 +239,38 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 				   uint32_t *p_val_node)
 {
 	uint32_t val_node;
+	struct vmw_validate_buffer *vval_buf;
 	struct ttm_validate_buffer *val_buf;
+	struct drm_hash_item *hash;
+	int ret;
 
-	val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
-	if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
-		DRM_ERROR("Max number of DMA buffers per submission"
-			  " exceeded.\n");
-		return -EINVAL;
-	}
-
-	val_buf = &sw_context->val_bufs[val_node];
-	if (unlikely(val_node == sw_context->cur_val_buf)) {
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+				    &hash) == 0)) {
+		vval_buf = container_of(hash, struct vmw_validate_buffer,
+					hash);
+		val_buf = &vval_buf->base;
+		val_node = vval_buf - sw_context->val_bufs;
+	} else {
+		val_node = sw_context->cur_val_buf;
+		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+			DRM_ERROR("Max number of DMA buffers per submission "
+				  "exceeded.\n");
+			return -EINVAL;
+		}
+		vval_buf = &sw_context->val_bufs[val_node];
+		vval_buf->hash.key = (unsigned long) bo;
+		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize a buffer validation "
+				  "entry.\n");
+			return ret;
+		}
+		++sw_context->cur_val_buf;
+		val_buf = &vval_buf->base;
 		val_buf->new_sync_obj_arg = NULL;
 		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->reserved = false;
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
-		++sw_context->cur_val_buf;
 	}
 
 	val_buf->new_sync_obj_arg = (void *)
@@ -103,85 +283,176 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 	return 0;
 }
 
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
-			     struct vmw_sw_context *sw_context,
-			     SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
-	struct vmw_resource *ctx;
-
-	struct vmw_cid_cmd {
-		SVGA3dCmdHeader header;
-		__le32 cid;
-	} *cmd;
+	struct vmw_resource_val_node *val;
 	int ret;
 
-	cmd = container_of(header, struct vmw_cid_cmd, header);
-	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
-		return 0;
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
 
-	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
-				&ctx);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could not find or use context %u\n",
-			  (unsigned) cmd->cid);
-		return ret;
+		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		if (unlikely(ret != 0))
+			return ret;
+
+		if (res->backup) {
+			struct ttm_buffer_object *bo = &res->backup->base;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, bo, (unsigned long)
+				 DRM_VMW_FENCE_FLAG_EXEC,
+				 NULL);
+
+			if (unlikely(ret != 0))
+				return ret;
+		}
 	}
+	return 0;
+}
 
-	sw_context->last_cid = cmd->cid;
-	sw_context->cid_valid = true;
-	sw_context->cur_ctx = ctx;
-	vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
 
+		ret = vmw_resource_validate(res);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to validate resource.\n");
+			return ret;
+		}
+	}
 	return 0;
 }
 
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 			     struct vmw_sw_context *sw_context,
-			     uint32_t *sid)
+			     enum vmw_res_type res_type,
+			     const struct vmw_user_resource_conv *converter,
+			     uint32_t *id,
+			     struct vmw_resource_val_node **p_val)
 {
-	struct vmw_surface *srf;
-	int ret;
+	struct vmw_res_cache_entry *rcache =
+		&sw_context->res_cache[res_type];
 	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+	int ret;
 
-	if (*sid == SVGA3D_INVALID_ID)
+	if (*id == SVGA3D_INVALID_ID)
 		return 0;
 
-	if (likely((sw_context->sid_valid  &&
-		      *sid == sw_context->last_sid))) {
-		*sid = sw_context->sid_translation;
-		return 0;
-	}
+	/*
+	 * Fastpath in case of repeated commands referencing the same
+	 * resource
+	 */
 
-	ret = vmw_user_surface_lookup_handle(dev_priv,
-					     sw_context->tfile,
-					     *sid, &srf);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could ot find or use surface 0x%08x "
-			  "address 0x%08lx\n",
-			  (unsigned int) *sid,
-			  (unsigned long) sid);
-		return ret;
+	if (likely(rcache->valid && *id == rcache->handle)) {
+		const struct vmw_resource *res = rcache->res;
+
+		rcache->node->first_usage = false;
+		if (p_val)
+			*p_val = rcache->node;
+
+		return vmw_resource_relocation_add
+			(&sw_context->res_relocations, res,
+			 id - sw_context->buf_start);
 	}
 
-	ret = vmw_surface_validate(dev_priv, srf);
+	ret = vmw_user_resource_lookup_handle(dev_priv,
+					      sw_context->tfile,
+					      *id,
+					      converter,
+					      &res);
 	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Could not validate surface.\n");
-		vmw_surface_unreference(&srf);
+		DRM_ERROR("Could not find or use resource 0x%08x.\n",
+			  (unsigned) *id);
+		dump_stack();
 		return ret;
 	}
 
-	sw_context->last_sid = *sid;
-	sw_context->sid_valid = true;
-	sw_context->sid_translation = srf->res.id;
-	*sid = sw_context->sid_translation;
+	rcache->valid = true;
+	rcache->res = res;
+	rcache->handle = *id;
 
-	res = &srf->res;
-	vmw_resource_to_validate_list(sw_context, &res);
+	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+					  res,
+					  id - sw_context->buf_start);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	ret = vmw_resource_val_add(sw_context, res, &node);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
 
+	rcache->node = node;
+	if (p_val)
+		*p_val = node;
+	vmw_resource_unreference(&res);
 	return 0;
+
+out_no_reloc:
+	BUG_ON(sw_context->error_resource != NULL);
+	sw_context->error_resource = res;
+
+	return ret;
 }
 
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_cid_cmd {
+		SVGA3dCmdHeader header;
+		__le32 cid;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_cid_cmd, header);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->cid, NULL);
+}
 
 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 					   struct vmw_sw_context *sw_context,
@@ -198,7 +469,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 		return ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.target.sid, NULL);
 	return ret;
 }
 
@@ -213,10 +486,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
 	if (unlikely(ret != 0))
 		return ret;
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +507,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
 	if (unlikely(ret != 0))
 		return ret;
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +533,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 		return -EPERM;
 	}
 
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.srcImage.sid, NULL);
 }
 
 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +555,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 		return -EPERM;
 	}
 
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter, &cmd->body.sid,
+				 NULL);
 }
 
 /**
  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  *
  * @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
  * @new_query_bo: The new buffer holding query results.
  * @sw_context: The software context used for this command submission.
  *
@@ -287,18 +571,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * query results, and if another buffer currently is pinned for query
  * results. If so, the function prepares the state of @sw_context for
  * switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-				       uint32_t cid,
 				       struct ttm_buffer_object *new_query_bo,
 				       struct vmw_sw_context *sw_context)
 {
+	struct vmw_res_cache_entry *ctx_entry =
+		&sw_context->res_cache[vmw_res_context];
 	int ret;
-	bool add_cid = false;
-	uint32_t cid_to_add;
+
+	BUG_ON(!ctx_entry->valid);
+	sw_context->last_query_ctx = ctx_entry->res;
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
@@ -308,9 +592,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 		}
 
 		if (unlikely(sw_context->cur_query_bo != NULL)) {
-			BUG_ON(!sw_context->query_cid_valid);
-			add_cid = true;
-			cid_to_add = sw_context->cur_query_cid;
+			sw_context->needs_post_query_barrier = true;
 			ret = vmw_bo_to_validate_list(sw_context,
 						      sw_context->cur_query_bo,
 						      DRM_VMW_FENCE_FLAG_EXEC,
@@ -329,28 +611,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 	}
 
-	if (unlikely(cid != sw_context->cur_query_cid &&
-		     sw_context->query_cid_valid)) {
-		add_cid = true;
-		cid_to_add = sw_context->cur_query_cid;
-	}
-
-	sw_context->cur_query_cid = cid;
-	sw_context->query_cid_valid = true;
-
-	if (add_cid) {
-		struct vmw_resource *ctx = sw_context->cur_ctx;
-
-		if (list_empty(&ctx->query_head))
-			list_add_tail(&ctx->query_head,
-				      &sw_context->query_list);
-		ret = vmw_bo_to_validate_list(sw_context,
-					      dev_priv->dummy_query_bo,
-					      DRM_VMW_FENCE_FLAG_EXEC,
-					      NULL);
-		if (unlikely(ret != 0))
-			return ret;
-	}
 	return 0;
 }
 
@@ -362,10 +622,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command submission batch.
  *
  * This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
  * issue a dummy occlusion query wait used as a query barrier. When the fence
  * object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
  * However, since both the new query buffer and the old one are fenced with
  * that fence, we can do an asynchronus unpin now, and be sure that the
  * old query buffer won't be moved until the fence has signaled.
@@ -376,20 +635,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 				     struct vmw_sw_context *sw_context)
 {
-
-	struct vmw_resource *ctx, *next_ctx;
-	int ret;
-
 	/*
 	 * The validate list should still hold references to all
 	 * contexts here.
 	 */
 
-	list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
-				 query_head) {
-		list_del_init(&ctx->query_head);
+	if (sw_context->needs_post_query_barrier) {
+		struct vmw_res_cache_entry *ctx_entry =
+			&sw_context->res_cache[vmw_res_context];
+		struct vmw_resource *ctx;
+		int ret;
 
-		BUG_ON(list_empty(&ctx->validate_head));
+		BUG_ON(!ctx_entry->valid);
+		ctx = ctx_entry->res;
 
 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
@@ -403,40 +661,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 			ttm_bo_unref(&dev_priv->pinned_bo);
 		}
 
-		vmw_bo_pin(sw_context->cur_query_bo, true);
+		if (!sw_context->needs_post_query_barrier) {
+			vmw_bo_pin(sw_context->cur_query_bo, true);
 
-		/*
-		 * We pin also the dummy_query_bo buffer so that we
-		 * don't need to validate it when emitting
-		 * dummy queries in context destroy paths.
-		 */
+			/*
+			 * We pin also the dummy_query_bo buffer so that we
+			 * don't need to validate it when emitting
+			 * dummy queries in context destroy paths.
+			 */
 
-		vmw_bo_pin(dev_priv->dummy_query_bo, true);
-		dev_priv->dummy_query_bo_pinned = true;
+			vmw_bo_pin(dev_priv->dummy_query_bo, true);
+			dev_priv->dummy_query_bo_pinned = true;
 
-		dev_priv->query_cid = sw_context->cur_query_cid;
-		dev_priv->pinned_bo =
-			ttm_bo_reference(sw_context->cur_query_bo);
+			BUG_ON(sw_context->last_query_ctx == NULL);
+			dev_priv->query_cid = sw_context->last_query_ctx->id;
+			dev_priv->query_cid_valid = true;
+			dev_priv->pinned_bo =
+				ttm_bo_reference(sw_context->cur_query_bo);
+		}
 	}
 }
 
 /**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
  *
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
  *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
  */
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
-	struct list_head *list, *next;
-
-	list_for_each_safe(list, next, &sw_context->query_list) {
-		list_del_init(list);
-	}
-}
-
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 				   struct vmw_sw_context *sw_context,
 				   SVGAGuestPtr *ptr,
@@ -479,6 +743,37 @@ out_no_reloc:
 	return ret;
 }
 
+/**
+ * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+			       struct vmw_sw_context *sw_context,
+			       SVGA3dCmdHeader *header)
+{
+	struct vmw_begin_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBeginQuery q;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_begin_query_cmd,
+			   header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->q.cid,
+				 NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 			     struct vmw_sw_context *sw_context,
 			     SVGA3dCmdHeader *header)
@@ -501,13 +796,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
-					  &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
 }
 
+/*
+ * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 			      struct vmw_sw_context *sw_context,
 			      SVGA3dCmdHeader *header)
@@ -518,7 +819,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		SVGA3dCmdWaitForQuery q;
 	} *cmd;
 	int ret;
-	struct vmw_resource *ctx;
 
 	cmd = container_of(header, struct vmw_query_cmd, header);
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +832,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		return ret;
 
 	vmw_dmabuf_unreference(&vmw_bo);
-
-	/*
-	 * This wait will act as a barrier for previous waits for this
-	 * context.
-	 */
-
-	ctx = sw_context->cur_ctx;
-	if (!list_empty(&ctx->query_head))
-		list_del_init(&ctx->query_head);
-
 	return 0;
 }
 
@@ -550,14 +840,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		       SVGA3dCmdHeader *header)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	struct vmw_surface *srf = NULL;
 	struct vmw_dma_cmd {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSurfaceDMA dma;
 	} *cmd;
 	int ret;
-	struct vmw_resource *res;
 
 	cmd = container_of(header, struct vmw_dma_cmd, header);
 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +854,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	bo = &vmw_bo->base;
-	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
-					     cmd->dma.host.sid, &srf);
-	if (ret) {
-		DRM_ERROR("could not find surface\n");
-		goto out_no_reloc;
-	}
-
-	ret = vmw_surface_validate(dev_priv, srf);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter, &cmd->dma.host.sid,
+				NULL);
 	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Culd not validate surface.\n");
-		goto out_no_validate;
+		if (unlikely(ret != -ERESTARTSYS))
+			DRM_ERROR("could not find surface for DMA.\n");
+		goto out_no_surface;
 	}
 
-	/*
-	 * Patch command stream with device SID.
-	 */
-	cmd->dma.host.sid = srf->res.id;
-	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
-	vmw_dmabuf_unreference(&vmw_bo);
+	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-	res = &srf->res;
-	vmw_resource_to_validate_list(sw_context, &res);
+	vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
 
-	return 0;
-
-out_no_validate:
-	vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
 }
@@ -629,8 +900,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 	}
 
 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&decl->array.surfaceId);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&decl->array.surfaceId, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -644,8 +916,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 
 	range = (SVGA3dPrimitiveRange *) decl;
 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&range->indexArray.surfaceId);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&range->indexArray.surfaceId, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -676,8 +949,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 			continue;
 
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&cur_state->value);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cur_state->value, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -708,6 +982,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 	return ret;
 }
 
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_set_shader_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_set_shader_cmd,
+			   header);
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return 0;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -781,16 +1083,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-		    &vmw_cmd_blt_surf_screen_check)
+		    &vmw_cmd_blt_surf_screen_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1143,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 	int32_t cur_size = size;
 	int ret;
 
+	sw_context->buf_start = buf;
+
 	while (cur_size > 0) {
 		size = cur_size;
 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1176,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 
 	for (i = 0; i < sw_context->cur_reloc; ++i) {
 		reloc = &sw_context->relocs[i];
-		validate = &sw_context->val_bufs[reloc->index];
+		validate = &sw_context->val_bufs[reloc->index].base;
 		bo = validate->bo;
-		if (bo->mem.mem_type == TTM_PL_VRAM) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
 			reloc->location->offset += bo->offset;
 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
-		} else
+			break;
+		case VMW_PL_GMR:
 			reloc->location->gmrId = bo->mem.start;
+			break;
+		default:
+			BUG();
+		}
 	}
 	vmw_free_relocations(sw_context);
 }
 
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+	struct vmw_resource_val_node *val, *val_next;
+
+	/*
+	 * Drop references to resources held during command submission.
+	 */
+
+	list_for_each_entry_safe(val, val_next, list, head) {
+		list_del_init(&val->head);
+		vmw_resource_unreference(&val->res);
+		kfree(val);
+	}
+}
+
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 {
-	struct ttm_validate_buffer *entry, *next;
-	struct vmw_resource *res, *res_next;
+	struct vmw_validate_buffer *entry, *next;
+	struct vmw_resource_val_node *val;
 
 	/*
 	 * Drop references to DMA buffers held during command submission.
 	 */
 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
-				 head) {
-		list_del(&entry->head);
-		vmw_dmabuf_validate_clear(entry->bo);
-		ttm_bo_unref(&entry->bo);
+				 base.head) {
+		list_del(&entry->base.head);
+		ttm_bo_unref(&entry->base.bo);
+		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
 		sw_context->cur_val_buf--;
 	}
 	BUG_ON(sw_context->cur_val_buf != 0);
 
-	/*
-	 * Drop references to resources held during command submission.
-	 */
-	vmw_resource_unreserve(&sw_context->resource_list);
-	list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
-				 validate_head) {
-		list_del_init(&res->validate_head);
-		vmw_resource_unreference(&res);
-	}
+	list_for_each_entry(val, &sw_context->resource_list, head)
+		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -947,11 +1275,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context)
 {
-	struct ttm_validate_buffer *entry;
+	struct vmw_validate_buffer *entry;
 	int ret;
 
-	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
-		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -1114,6 +1442,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 {
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
 	struct vmw_fence_obj *fence = NULL;
+	struct vmw_resource *error_resource;
+	struct list_head resource_list;
 	uint32_t handle;
 	void *cmd;
 	int ret;
@@ -1143,24 +1473,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		sw_context->kernel = true;
 
 	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
-	sw_context->cid_valid = false;
-	sw_context->sid_valid = false;
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	sw_context->fence_flags = 0;
-	INIT_LIST_HEAD(&sw_context->query_list);
 	INIT_LIST_HEAD(&sw_context->resource_list);
 	sw_context->cur_query_bo = dev_priv->pinned_bo;
-	sw_context->cur_query_cid = dev_priv->query_cid;
-	sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+	sw_context->last_query_ctx = NULL;
+	sw_context->needs_post_query_barrier = false;
+	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
+	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (!sw_context->res_ht_initialized) {
+		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+		sw_context->res_ht_initialized = true;
+	}
 
+	INIT_LIST_HEAD(&resource_list);
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
 	if (unlikely(ret != 0))
 		goto out_err;
 
+	ret = vmw_resources_reserve(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
+
 	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 	if (unlikely(ret != 0))
 		goto out_err;
@@ -1169,24 +1508,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (unlikely(ret != 0))
 		goto out_err;
 
-	vmw_apply_relocations(sw_context);
+	ret = vmw_resources_validate(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
 
 	if (throttle_us) {
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 				   throttle_us);
 
 		if (unlikely(ret != 0))
-			goto out_throttle;
+			goto out_err;
 	}
 
 	cmd = vmw_fifo_reserve(dev_priv, command_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
 		ret = -ENOMEM;
-		goto out_throttle;
+		goto out_err;
 	}
 
+	vmw_apply_relocations(sw_context);
 	memcpy(cmd, kernel_commands, command_size);
+
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+
 	vmw_fifo_commit(dev_priv, command_size);
 
 	vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1548,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (ret != 0)
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
+	vmw_resource_list_unreserve(&sw_context->resource_list, false);
 	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
 				    (void *) fence);
 
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
 	vmw_clear_validations(sw_context);
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
 				    user_fence_rep, fence, handle);
@@ -1217,17 +1568,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		vmw_fence_obj_unreference(&fence);
 	}
 
+	list_splice_init(&sw_context->resource_list, &resource_list);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+
 	return 0;
 
 out_err:
+	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
-out_throttle:
-	vmw_query_switch_backoff(sw_context);
 	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 	vmw_clear_validations(sw_context);
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 out_unlock:
+	list_splice_init(&sw_context->resource_list, &resource_list);
+	error_resource = sw_context->error_resource;
+	sw_context->error_resource = NULL;
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+	if (unlikely(error_resource != NULL))
+		vmw_resource_unreference(&error_resource);
+
 	return ret;
 }
 
@@ -1252,13 +1626,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 
 
 /**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  * query bo.
  *
  * @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
  *
  * This function should be used to unpin the pinned query bo, or
  * as a query barrier when we need to make sure that all queries have
@@ -1271,23 +1645,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  *
  * The function will synchronize on the previous query barrier, and will
  * thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
  */
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-				   bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+				     struct vmw_fence_obj *fence)
 {
 	int ret = 0;
 	struct list_head validate_list;
 	struct ttm_validate_buffer pinned_val, query_val;
-	struct vmw_fence_obj *fence;
-
-	mutex_lock(&dev_priv->cmdbuf_mutex);
+	struct vmw_fence_obj *lfence = NULL;
 
 	if (dev_priv->pinned_bo == NULL)
 		goto out_unlock;
 
-	if (only_on_cid_match && cid != dev_priv->query_cid)
-		goto out_unlock;
-
 	INIT_LIST_HEAD(&validate_list);
 
 	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
@@ -1308,25 +1680,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 		goto out_no_reserve;
 	}
 
-	ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
-	if (unlikely(ret != 0)) {
-		vmw_execbuf_unpin_panic(dev_priv);
-		goto out_no_emit;
+	if (dev_priv->query_cid_valid) {
+		BUG_ON(fence != NULL);
+		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+		if (unlikely(ret != 0)) {
+			vmw_execbuf_unpin_panic(dev_priv);
+			goto out_no_emit;
+		}
+		dev_priv->query_cid_valid = false;
 	}
 
 	vmw_bo_pin(dev_priv->pinned_bo, false);
 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
 	dev_priv->dummy_query_bo_pinned = false;
 
-	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	if (fence == NULL) {
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+						  NULL);
+		fence = lfence;
+	}
 	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+	if (lfence != NULL)
+		vmw_fence_obj_unreference(&lfence);
 
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
 	ttm_bo_unref(&dev_priv->pinned_bo);
 
 out_unlock:
-	mutex_unlock(&dev_priv->cmdbuf_mutex);
 	return;
 
 out_no_emit:
@@ -1335,6 +1716,31 @@ out_no_reserve:
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
 	ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	if (dev_priv->query_cid_valid)
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b07ca2e..2f7c08e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -131,6 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
 	struct drm_vmw_rect *clips = NULL;
 	struct drm_mode_object *obj;
 	struct vmw_framebuffer *vfb;
+	struct vmw_resource *res;
 	uint32_t num_clips;
 	int ret;
 
@@ -178,11 +179,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
 	if (unlikely(ret != 0))
 		goto out_no_ttm_lock;
 
-	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
-					     &surface);
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+					      user_surface_converter,
+					      &res);
 	if (ret)
 		goto out_no_surface;
 
+	surface = vmw_res_to_srf(res);
 	ret = vmw_kms_present(dev_priv, file_priv,
 			      vfb, surface, arg->sid,
 			      arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index ae675c6..06e2135 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -31,15 +31,47 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 
-struct vmw_user_context {
-	struct ttm_base_object base;
-	struct vmw_resource res;
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+	enum ttm_object_type object_type;
+	struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+	void (*res_free) (struct vmw_resource *res);
 };
 
-struct vmw_user_surface {
-	struct ttm_base_object base;
-	struct vmw_surface srf;
-	uint32_t size;
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ */
+
+struct vmw_res_func {
+	enum vmw_res_type res_type;
+	bool needs_backup;
+	const char *type_name;
+	struct ttm_placement *backup_placement;
+	bool may_evict;
+
+	int (*create) (struct vmw_resource *res);
+	int (*destroy) (struct vmw_resource *res);
+	int (*bind) (struct vmw_resource *res,
+		     struct ttm_validate_buffer *val_buf);
+	int (*unbind) (struct vmw_resource *res,
+		       bool readback,
+		       struct ttm_validate_buffer *val_buf);
 };
 
 struct vmw_user_dma_buffer {
@@ -62,16 +94,118 @@ struct vmw_user_stream {
 	struct vmw_stream stream;
 };
 
+
+static uint64_t vmw_user_stream_size;
+
+static const struct vmw_res_func vmw_stream_func = {
+	.res_type = vmw_res_stream,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "video streams",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+struct vmw_user_context {
+	struct ttm_base_object base;
+	struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+	.object_type = VMW_RES_CONTEXT,
+	.base_obj_to_res = vmw_user_context_base_to_res,
+	.res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+	&user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "legacy contexts",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+	struct ttm_base_object base;
+	struct vmw_surface srf;
+	uint32_t size;
+	uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
 struct vmw_surface_offset {
 	uint32_t face;
 	uint32_t mip;
 	uint32_t bo_offset;
 };
 
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+	.object_type = VMW_RES_SURFACE,
+	.base_obj_to_res = vmw_user_surface_base_to_res,
+	.res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+	&user_surface_conv;
+
 
-static uint64_t vmw_user_context_size;
 static uint64_t vmw_user_surface_size;
-static uint64_t vmw_user_stream_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = false,
+	.may_evict = true,
+	.type_name = "legacy surfaces",
+	.backup_placement = &vmw_srf_placement,
+	.create = &vmw_legacy_srf_create,
+	.destroy = &vmw_legacy_srf_destroy,
+	.bind = &vmw_legacy_srf_bind,
+	.unbind = &vmw_legacy_srf_unbind
+};
+
 
 static inline struct vmw_dma_buffer *
 vmw_dma_buffer(struct ttm_buffer_object *bo)
@@ -103,10 +237,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
 static void vmw_resource_release_id(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	write_lock(&dev_priv->resource_lock);
 	if (res->id != -1)
-		idr_remove(res->idr, res->id);
+		idr_remove(idr, res->id);
 	res->id = -1;
 	write_unlock(&dev_priv->resource_lock);
 }
@@ -116,17 +251,36 @@ static void vmw_resource_release(struct kref *kref)
 	struct vmw_resource *res =
 	    container_of(kref, struct vmw_resource, kref);
 	struct vmw_private *dev_priv = res->dev_priv;
-	int id = res->id;
-	struct idr *idr = res->idr;
+	int id;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	res->avail = false;
-	if (res->remove_from_lists != NULL)
-		res->remove_from_lists(res);
+	list_del_init(&res->lru_head);
 	write_unlock(&dev_priv->resource_lock);
+	if (res->backup) {
+		struct ttm_buffer_object *bo = &res->backup->base;
+
+		ttm_bo_reserve(bo, false, false, false, 0);
+		if (!list_empty(&res->mob_head) &&
+		    res->func->unbind != NULL) {
+			struct ttm_validate_buffer val_buf;
+
+			val_buf.bo = bo;
+			val_buf.new_sync_obj_arg =
+				(void *)((unsigned long)
+					 DRM_VMW_FENCE_FLAG_EXEC);
+			res->func->unbind(res, false, &val_buf);
+		}
+		res->backup_dirty = false;
+		list_del_init(&res->mob_head);
+		ttm_bo_unreserve(bo);
+		vmw_dmabuf_unreference(&res->backup);
+	}
 
 	if (likely(res->hw_destroy != NULL))
 		res->hw_destroy(res);
 
+	id = res->id;
 	if (res->res_free != NULL)
 		res->res_free(res);
 	else
@@ -153,25 +307,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
 /**
  * vmw_resource_alloc_id - release a resource id to the id manager.
  *
- * @dev_priv: Pointer to the device private structure.
  * @res: Pointer to the resource.
  *
  * Allocate the lowest free resource from the resource manager, and set
  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  */
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
-				 struct vmw_resource *res)
+static int vmw_resource_alloc_id(struct vmw_resource *res)
 {
+	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	BUG_ON(res->id != -1);
 
 	do {
-		if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 			return -ENOMEM;
 
 		write_lock(&dev_priv->resource_lock);
-		ret = idr_get_new_above(res->idr, res, 1, &res->id);
+		ret = idr_get_new_above(idr, res, 1, &res->id);
 		write_unlock(&dev_priv->resource_lock);
 
 	} while (ret == -EAGAIN);
@@ -179,31 +333,40 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
 	return ret;
 }
 
-
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
 static int vmw_resource_init(struct vmw_private *dev_priv,
 			     struct vmw_resource *res,
-			     struct idr *idr,
-			     enum ttm_object_type obj_type,
 			     bool delay_id,
 			     void (*res_free) (struct vmw_resource *res),
-			     void (*remove_from_lists)
-			     (struct vmw_resource *res))
+			     const struct vmw_res_func *func)
 {
 	kref_init(&res->kref);
 	res->hw_destroy = NULL;
 	res->res_free = res_free;
-	res->remove_from_lists = remove_from_lists;
-	res->res_type = obj_type;
-	res->idr = idr;
 	res->avail = false;
 	res->dev_priv = dev_priv;
-	INIT_LIST_HEAD(&res->query_head);
-	INIT_LIST_HEAD(&res->validate_head);
+	res->func = func;
+	INIT_LIST_HEAD(&res->lru_head);
+	INIT_LIST_HEAD(&res->mob_head);
 	res->id = -1;
+	res->backup = NULL;
+	res->backup_offset = 0;
+	res->backup_dirty = false;
+	res->res_dirty = false;
 	if (delay_id)
 		return 0;
 	else
-		return vmw_resource_alloc_id(dev_priv, res);
+		return vmw_resource_alloc_id(res);
 }
 
 /**
@@ -218,7 +381,6 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
  * Activate basically means that the function vmw_resource_lookup will
  * find it.
  */
-
 static void vmw_resource_activate(struct vmw_resource *res,
 				  void (*hw_destroy) (struct vmw_resource *))
 {
@@ -263,8 +425,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 	} *cmd;
 
 
-	vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
+	vmw_execbuf_release_pinned_bo(dev_priv);
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
@@ -291,8 +452,8 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 		SVGA3dCmdDefineContext body;
 	} *cmd;
 
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
-				VMW_RES_CONTEXT, false, res_free, NULL);
+	ret = vmw_resource_init(dev_priv, res, false,
+				res_free, &vmw_legacy_context_func);
 
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed to allocate a resource id.\n");
@@ -338,6 +499,7 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
 		return NULL;
 
 	ret = vmw_context_init(dev_priv, res, NULL);
+
 	return (ret == 0) ? res : NULL;
 }
 
@@ -345,6 +507,12 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  * User-space context management:
  */
 
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
 static void vmw_user_context_free(struct vmw_resource *res)
 {
 	struct vmw_user_context *ctx =
@@ -375,32 +543,10 @@ static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_resource *res;
-	struct vmw_user_context *ctx;
 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	int ret = 0;
-
-	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
-	if (unlikely(res == NULL))
-		return -EINVAL;
-
-	if (res->res_free != &vmw_user_context_free) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ctx = container_of(res, struct vmw_user_context, res);
-	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
-		ret = -EPERM;
-		goto out;
-	}
 
-	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
-	vmw_resource_unreference(&res);
-	return ret;
+	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -438,7 +584,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
-	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (unlikely(ctx == NULL)) {
 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 				    vmw_user_context_size);
@@ -467,7 +613,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 		goto out_err;
 	}
 
-	arg->cid = res->id;
+	arg->cid = ctx->base.hash.key;
 out_err:
 	vmw_resource_unreference(&res);
 out_unlock:
@@ -476,30 +622,13 @@ out_unlock:
 
 }
 
-int vmw_context_check(struct vmw_private *dev_priv,
-		      struct ttm_object_file *tfile,
-		      int id,
-		      struct vmw_resource **p_res)
-{
-	struct vmw_resource *res;
-	int ret = 0;
-
-	read_lock(&dev_priv->resource_lock);
-	res = idr_find(&dev_priv->context_idr, id);
-	if (res && res->avail) {
-		struct vmw_user_context *ctx =
-			container_of(res, struct vmw_user_context, res);
-		if (ctx->base.tfile != tfile && !ctx->base.shareable)
-			ret = -EPERM;
-		if (p_res)
-			*p_res = vmw_resource_reference(res);
-	} else
-		ret = -EINVAL;
-	read_unlock(&dev_priv->resource_lock);
-
-	return ret;
-}
-
+/**
+ * struct vmw_bpp - Bits per pixel info for surface storage size computation.
+ *
+ * @bpp:         Bits per pixel.
+ * @s_bpp:       Stride bits per pixel. See definition below.
+ *
+ */
 struct vmw_bpp {
 	uint8_t bpp;
 	uint8_t s_bpp;
@@ -573,9 +702,8 @@ static const struct vmw_bpp vmw_sf_bpp[] = {
 
 
 /**
- * Surface management.
+ * struct vmw_surface_dma - SVGA3D DMA command
  */
-
 struct vmw_surface_dma {
 	SVGA3dCmdHeader header;
 	SVGA3dCmdSurfaceDMA body;
@@ -583,11 +711,17 @@ struct vmw_surface_dma {
 	SVGA3dCmdSurfaceDMASuffix suffix;
 };
 
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
 struct vmw_surface_define {
 	SVGA3dCmdHeader header;
 	SVGA3dCmdDefineSurface body;
 };
 
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
 struct vmw_surface_destroy {
 	SVGA3dCmdHeader header;
 	SVGA3dCmdDestroySurface body;
@@ -688,7 +822,6 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
 	}
 }
 
-
 /**
  * vmw_surface_dma_encode - Encode a surface_dma command.
  *
@@ -748,6 +881,15 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
 };
 
 
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
 static void vmw_hw_surface_destroy(struct vmw_resource *res)
 {
 
@@ -774,47 +916,30 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
 		 */
 
 		mutex_lock(&dev_priv->cmdbuf_mutex);
-		srf = container_of(res, struct vmw_surface, res);
-		dev_priv->used_memory_size -= srf->backup_size;
+		srf = vmw_res_to_srf(res);
+		dev_priv->used_memory_size -= res->backup_size;
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
-
 	}
 	vmw_3d_resource_dec(dev_priv, false);
 }
 
-void vmw_surface_res_free(struct vmw_resource *res)
-{
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-	if (srf->backup)
-		ttm_bo_unref(&srf->backup);
-	kfree(srf->offsets);
-	kfree(srf->sizes);
-	kfree(srf->snooper.image);
-	kfree(srf);
-}
-
-
 /**
- * vmw_surface_do_validate - make a surface available to the device.
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
  *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
+ * @res: Pointer to a struct vmw_surface.
  *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
+ * If the surface doesn't have a hw id.
  *
  * Returns -EBUSY if there wasn't sufficient device resources to
  * complete the validation. Retry after freeing up resources.
  *
  * May return other errors if the kernel is out of guest resources.
  */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
-			    struct vmw_surface *srf)
+static int vmw_legacy_srf_create(struct vmw_resource *res)
 {
-	struct vmw_resource *res = &srf->res;
-	struct list_head val_list;
-	struct ttm_validate_buffer val_buf;
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
 	uint32_t submit_size;
 	uint8_t *cmd;
 	int ret;
@@ -822,179 +947,180 @@ int vmw_surface_do_validate(struct vmw_private *dev_priv,
 	if (likely(res->id != -1))
 		return 0;
 
-	if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
+	srf = vmw_res_to_srf(res);
+	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
 		     dev_priv->memory_size))
 		return -EBUSY;
 
 	/*
-	 * Reserve- and validate the backup DMA bo.
-	 */
-
-	if (srf->backup) {
-		INIT_LIST_HEAD(&val_list);
-		val_buf.bo = ttm_bo_reference(srf->backup);
-		val_buf.new_sync_obj_arg = (void *)((unsigned long)
-						    DRM_VMW_FENCE_FLAG_EXEC);
-		list_add_tail(&val_buf.head, &val_list);
-		ret = ttm_eu_reserve_buffers(&val_list);
-		if (unlikely(ret != 0))
-			goto out_no_reserve;
-
-		ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-				      true, false, false);
-		if (unlikely(ret != 0))
-			goto out_no_validate;
-	}
-
-	/*
 	 * Alloc id for the resource.
 	 */
 
-	ret = vmw_resource_alloc_id(dev_priv, res);
+	ret = vmw_resource_alloc_id(res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed to allocate a surface id.\n");
 		goto out_no_id;
 	}
+
 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
 		ret = -EBUSY;
 		goto out_no_fifo;
 	}
 
-
 	/*
-	 * Encode surface define- and dma commands.
+	 * Encode surface define- commands.
 	 */
 
 	submit_size = vmw_surface_define_size(srf);
-	if (srf->backup)
-		submit_size += vmw_surface_dma_size(srf);
-
 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
-			  "validation.\n");
+			  "creation.\n");
 		ret = -ENOMEM;
 		goto out_no_fifo;
 	}
 
 	vmw_surface_define_encode(srf, cmd);
-	if (srf->backup) {
-		SVGAGuestPtr ptr;
-
-		cmd += vmw_surface_define_size(srf);
-		vmw_bo_get_guest_ptr(srf->backup, &ptr);
-		vmw_surface_dma_encode(srf, cmd, &ptr, true);
-	}
-
 	vmw_fifo_commit(dev_priv, submit_size);
-
-	/*
-	 * Create a fence object and fence the backup buffer.
-	 */
-
-	if (srf->backup) {
-		struct vmw_fence_obj *fence;
-
-		(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-						  &fence, NULL);
-		ttm_eu_fence_buffer_objects(&val_list, fence);
-		if (likely(fence != NULL))
-			vmw_fence_obj_unreference(&fence);
-		ttm_bo_unref(&val_buf.bo);
-		ttm_bo_unref(&srf->backup);
-	}
-
 	/*
 	 * Surface memory usage accounting.
 	 */
 
-	dev_priv->used_memory_size += srf->backup_size;
-
+	dev_priv->used_memory_size += res->backup_size;
 	return 0;
 
 out_no_fifo:
 	vmw_resource_release_id(res);
 out_no_id:
-out_no_validate:
-	if (srf->backup)
-		ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-	if (srf->backup)
-		ttm_bo_unref(&val_buf.bo);
 	return ret;
 }
 
 /**
- * vmw_surface_evict - Evict a hw surface.
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
  *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
  *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
  */
-int vmw_surface_evict(struct vmw_private *dev_priv,
-		      struct vmw_surface *srf)
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf,
+			      bool bind)
 {
-	struct vmw_resource *res = &srf->res;
-	struct list_head val_list;
-	struct ttm_validate_buffer val_buf;
+	SVGAGuestPtr ptr;
+	struct vmw_fence_obj *fence;
 	uint32_t submit_size;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
 	uint8_t *cmd;
-	int ret;
-	struct vmw_fence_obj *fence;
-	SVGAGuestPtr ptr;
-
-	BUG_ON(res->id == -1);
+	struct vmw_private *dev_priv = res->dev_priv;
 
-	/*
-	 * Create a surface backup buffer object.
-	 */
+	BUG_ON(val_buf->bo == NULL);
 
-	if (!srf->backup) {
-		ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
-				    ttm_bo_type_device,
-				    &vmw_srf_placement, 0, 0, true,
-				    NULL, &srf->backup);
-		if (unlikely(ret != 0))
-			return ret;
+	submit_size = vmw_surface_dma_size(srf);
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "DMA.\n");
+		return -ENOMEM;
 	}
+	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+	vmw_fifo_commit(dev_priv, submit_size);
 
 	/*
-	 * Reserve- and validate the backup DMA bo.
+	 * Create a fence object and fence the backup buffer.
 	 */
 
-	INIT_LIST_HEAD(&val_list);
-	val_buf.bo = ttm_bo_reference(srf->backup);
-	val_buf.new_sync_obj_arg = (void *)(unsigned long)
-		DRM_VMW_FENCE_FLAG_EXEC;
-	list_add_tail(&val_buf.head, &val_list);
-	ret = ttm_eu_reserve_buffers(&val_list);
-	if (unlikely(ret != 0))
-		goto out_no_reserve;
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
 
-	ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-			      true, false, false);
-	if (unlikely(ret != 0))
-		goto out_no_validate;
+	vmw_fence_single_bo(val_buf->bo, fence,
+			    val_buf->new_sync_obj_arg);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	if (!res->backup_dirty)
+		return 0;
+
+	return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	if (unlikely(readback))
+		return vmw_legacy_srf_dma(res, val_buf, false);
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	uint32_t submit_size;
+	uint8_t *cmd;
 
+	BUG_ON(res->id == -1);
 
 	/*
 	 * Encode the dma- and surface destroy commands.
 	 */
 
-	submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
+	submit_size = vmw_surface_destroy_size();
 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
 			  "eviction.\n");
-		ret = -ENOMEM;
-		goto out_no_fifo;
+		return -ENOMEM;
 	}
 
-	vmw_bo_get_guest_ptr(srf->backup, &ptr);
-	vmw_surface_dma_encode(srf, cmd, &ptr, false);
-	cmd += vmw_surface_dma_size(srf);
 	vmw_surface_destroy_encode(res->id, cmd);
 	vmw_fifo_commit(dev_priv, submit_size);
 
@@ -1002,18 +1128,7 @@ int vmw_surface_evict(struct vmw_private *dev_priv,
 	 * Surface memory usage accounting.
 	 */
 
-	dev_priv->used_memory_size -= srf->backup_size;
-
-	/*
-	 * Create a fence object and fence the DMA buffer.
-	 */
-
-	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-					  &fence, NULL);
-	ttm_eu_fence_buffer_objects(&val_list, fence);
-	if (likely(fence != NULL))
-		vmw_fence_obj_unreference(&fence);
-	ttm_bo_unref(&val_buf.bo);
+	dev_priv->used_memory_size -= res->backup_size;
 
 	/*
 	 * Release the surface ID.
@@ -1022,128 +1137,72 @@ int vmw_surface_evict(struct vmw_private *dev_priv,
 	vmw_resource_release_id(res);
 
 	return 0;
-
-out_no_fifo:
-out_no_validate:
-	if (srf->backup)
-		ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-	ttm_bo_unref(&val_buf.bo);
-	ttm_bo_unref(&srf->backup);
-	return ret;
 }
 
 
 /**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
+ * vmw_surface_init - initialize a struct vmw_surface
  *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
  */
-int vmw_surface_validate(struct vmw_private *dev_priv,
-			 struct vmw_surface *srf)
+static int vmw_surface_init(struct vmw_private *dev_priv,
+			    struct vmw_surface *srf,
+			    void (*res_free) (struct vmw_resource *res))
 {
 	int ret;
-	struct vmw_surface *evict_srf;
+	struct vmw_resource *res = &srf->res;
 
-	do {
-		write_lock(&dev_priv->resource_lock);
-		list_del_init(&srf->lru_head);
-		write_unlock(&dev_priv->resource_lock);
+	BUG_ON(res_free == NULL);
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_legacy_surface_func);
 
-		ret = vmw_surface_do_validate(dev_priv, srf);
-		if (likely(ret != -EBUSY))
-			break;
+	if (unlikely(ret != 0)) {
+		vmw_3d_resource_dec(dev_priv, false);
+		res_free(res);
+		return ret;
+	}
 
-		write_lock(&dev_priv->resource_lock);
-		if (list_empty(&dev_priv->surface_lru)) {
-			DRM_ERROR("Out of device memory for surfaces.\n");
-			ret = -EBUSY;
-			write_unlock(&dev_priv->resource_lock);
-			break;
-		}
-
-		evict_srf = vmw_surface_reference
-			(list_first_entry(&dev_priv->surface_lru,
-					  struct vmw_surface,
-					  lru_head));
-		list_del_init(&evict_srf->lru_head);
-
-		write_unlock(&dev_priv->resource_lock);
-		(void) vmw_surface_evict(dev_priv, evict_srf);
-
-		vmw_surface_unreference(&evict_srf);
-
-	} while (1);
-
-	if (unlikely(ret != 0 && srf->res.id != -1)) {
-		write_lock(&dev_priv->resource_lock);
-		list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
-		write_unlock(&dev_priv->resource_lock);
-	}
+	/*
+	 * The surface won't be visible to hardware until a
+	 * surface validate.
+	 */
 
+	vmw_resource_activate(res, vmw_hw_surface_destroy);
 	return ret;
 }
 
-
 /**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
  *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
+ * @base:           Pointer to a TTM base object
  *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
  */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
 {
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-	list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
-		     struct vmw_surface *srf,
-		     void (*res_free) (struct vmw_resource *res))
-{
-	int ret;
-	struct vmw_resource *res = &srf->res;
-
-	BUG_ON(res_free == NULL);
-	INIT_LIST_HEAD(&srf->lru_head);
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
-				VMW_RES_SURFACE, true, res_free,
-				vmw_surface_remove_from_lists);
-
-	if (unlikely(ret != 0))
-		res_free(res);
-
-	/*
-	 * The surface won't be visible to hardware until a
-	 * surface validate.
-	 */
-
-	(void) vmw_3d_resource_inc(dev_priv, false);
-	vmw_resource_activate(res, vmw_hw_surface_destroy);
-	return ret;
+	return &(container_of(base, struct vmw_user_surface, base)->srf.res);
 }
 
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
 static void vmw_user_surface_free(struct vmw_resource *res)
 {
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+	struct vmw_surface *srf = vmw_res_to_srf(res);
 	struct vmw_user_surface *user_srf =
 	    container_of(srf, struct vmw_user_surface, srf);
 	struct vmw_private *dev_priv = srf->res.dev_priv;
 	uint32_t size = user_srf->size;
 
-	if (srf->backup)
-		ttm_bo_unref(&srf->backup);
 	kfree(srf->offsets);
 	kfree(srf->sizes);
 	kfree(srf->snooper.image);
@@ -1152,108 +1211,14 @@ static void vmw_user_surface_free(struct vmw_resource *res)
 }
 
 /**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
+ * vmw_user_surface_free - User visible surface TTM base object destructor
  *
- * @list_head: list of resources to unreserve.
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
  *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
  */
-void vmw_resource_unreserve(struct list_head *list)
-{
-	struct vmw_resource *res;
-	struct vmw_surface *srf;
-	rwlock_t *lock = NULL;
-
-	list_for_each_entry(res, list, validate_head) {
-
-		if (res->res_free != &vmw_surface_res_free &&
-		    res->res_free != &vmw_user_surface_free)
-			continue;
-
-		if (unlikely(lock == NULL)) {
-			lock = &res->dev_priv->resource_lock;
-			write_lock(lock);
-		}
-
-		srf = container_of(res, struct vmw_surface, res);
-		list_del_init(&srf->lru_head);
-		list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
-	}
-
-	if (lock != NULL)
-		write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
-			   struct ttm_object_file *tfile,
-			   uint32_t handle,
-			   struct vmw_surface **out_surf,
-			   struct vmw_dma_buffer **out_buf)
-{
-	int ret;
-
-	BUG_ON(*out_surf || *out_buf);
-
-	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
-	if (!ret)
-		return 0;
-
-	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
-	return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-				   struct ttm_object_file *tfile,
-				   uint32_t handle, struct vmw_surface **out)
-{
-	struct vmw_resource *res;
-	struct vmw_surface *srf;
-	struct vmw_user_surface *user_srf;
-	struct ttm_base_object *base;
-	int ret = -EINVAL;
-
-	base = ttm_base_object_lookup(tfile, handle);
-	if (unlikely(base == NULL))
-		return -EINVAL;
-
-	if (unlikely(base->object_type != VMW_RES_SURFACE))
-		goto out_bad_resource;
-
-	user_srf = container_of(base, struct vmw_user_surface, base);
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	read_lock(&dev_priv->resource_lock);
-
-	if (!res->avail || res->res_free != &vmw_user_surface_free) {
-		read_unlock(&dev_priv->resource_lock);
-		goto out_bad_resource;
-	}
-
-	kref_get(&res->kref);
-	read_unlock(&dev_priv->resource_lock);
-
-	*out = srf;
-	ret = 0;
-
-out_bad_resource:
-	ttm_base_object_unref(&base);
-
-	return ret;
-}
-
 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 {
 	struct ttm_base_object *base = *p_base;
@@ -1265,6 +1230,14 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 	vmw_resource_unreference(&res);
 }
 
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
@@ -1274,6 +1247,14 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 }
 
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv)
 {
@@ -1329,7 +1310,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
-	user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
 	if (unlikely(user_srf == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_user_srf;
@@ -1341,7 +1322,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 	srf->flags = req->flags;
 	srf->format = req->format;
 	srf->scanout = req->scanout;
-	srf->backup = NULL;
 
 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
 	srf->num_sizes = num_sizes;
@@ -1369,6 +1349,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 		goto out_no_copy;
 	}
 
+	srf->base_size = *srf->sizes;
+	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	srf->multisample_count = 1;
+
 	cur_bo_offset = 0;
 	cur_offset = srf->offsets;
 	cur_size = srf->sizes;
@@ -1390,7 +1374,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 			++cur_size;
 		}
 	}
-	srf->backup_size = cur_bo_offset;
+	res->backup_size = cur_bo_offset;
 
 	if (srf->scanout &&
 	    srf->num_sizes == 1 &&
@@ -1434,9 +1418,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 	}
 
 	rep->sid = user_srf->base.hash.key;
-	if (rep->sid == SVGA3D_INVALID_ID)
-		DRM_ERROR("Created bad Surface ID.\n");
-
 	vmw_resource_unreference(&res);
 
 	ttm_read_unlock(&vmaster->lock);
@@ -1454,6 +1435,14 @@ out_unlock:
 	return ret;
 }
 
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv)
 {
@@ -1507,33 +1496,84 @@ out_no_reference:
 	return ret;
 }
 
-int vmw_surface_check(struct vmw_private *dev_priv,
-		      struct ttm_object_file *tfile,
-		      uint32_t handle, int *id)
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+				    struct ttm_object_file *tfile,
+				    uint32_t handle,
+				    const struct vmw_user_resource_conv
+				    *converter,
+				    struct vmw_resource **p_res)
 {
 	struct ttm_base_object *base;
-	struct vmw_user_surface *user_srf;
-
-	int ret = -EPERM;
+	struct vmw_resource *res;
+	int ret = -EINVAL;
 
 	base = ttm_base_object_lookup(tfile, handle);
 	if (unlikely(base == NULL))
 		return -EINVAL;
 
-	if (unlikely(base->object_type != VMW_RES_SURFACE))
-		goto out_bad_surface;
+	if (unlikely(base->object_type != converter->object_type))
+		goto out_bad_resource;
 
-	user_srf = container_of(base, struct vmw_user_surface, base);
-	*id = user_srf->srf.res.id;
-	ret = 0;
+	res = converter->base_obj_to_res(base);
 
-out_bad_surface:
-	/**
-	 * FIXME: May deadlock here when called from the
-	 * command parsing code.
-	 */
+	read_lock(&dev_priv->resource_lock);
+	if (!res->avail || res->res_free != converter->res_free) {
+		read_unlock(&dev_priv->resource_lock);
+		goto out_bad_resource;
+	}
+
+	kref_get(&res->kref);
+	read_unlock(&dev_priv->resource_lock);
+
+	*p_res = res;
+	ret = 0;
 
+out_bad_resource:
 	ttm_base_object_unref(&base);
+
+	return ret;
+}
+
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   struct vmw_surface **out_surf,
+			   struct vmw_dma_buffer **out_buf)
+{
+	struct vmw_resource *res;
+	int ret;
+
+	BUG_ON(*out_surf || *out_buf);
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+					      user_surface_converter,
+					      &res);
+	if (!ret) {
+		*out_surf = vmw_res_to_srf(res);
+		return 0;
+	}
+
+	*out_surf = NULL;
+	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
 	return ret;
 }
 
@@ -1562,7 +1602,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
 	memset(vmw_bo, 0, sizeof(*vmw_bo));
 
-	INIT_LIST_HEAD(&vmw_bo->validate_list);
+	INIT_LIST_HEAD(&vmw_bo->res_list);
 
 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 			  ttm_bo_type_device, placement,
@@ -1594,6 +1634,59 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 	ttm_bo_unref(&bo);
 }
 
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+			  struct ttm_object_file *tfile,
+			  uint32_t size,
+			  bool shareable,
+			  uint32_t *handle,
+			  struct vmw_dma_buffer **p_dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+	struct ttm_buffer_object *tmp;
+	int ret;
+
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(user_bo == NULL)) {
+		DRM_ERROR("Failed to allocate a buffer.\n");
+		return -ENOMEM;
+	}
+
+	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+			      &vmw_vram_sys_placement, true,
+			      &vmw_user_dmabuf_destroy);
+	if (unlikely(ret != 0))
+		return ret;
+
+	tmp = ttm_bo_reference(&user_bo->dma.base);
+	ret = ttm_base_object_init(tfile,
+				   &user_bo->base,
+				   shareable,
+				   ttm_buffer_type,
+				   &vmw_user_dmabuf_release, NULL);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unref(&tmp);
+		goto out_no_base_object;
+	}
+
+	*p_dma_buf = &user_bo->dma;
+	*handle = user_bo->base.hash.key;
+
+out_no_base_object:
+	return ret;
+}
+
 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
@@ -1602,44 +1695,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 	    (union drm_vmw_alloc_dmabuf_arg *)data;
 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-	struct vmw_user_dma_buffer *vmw_user_bo;
-	struct ttm_buffer_object *tmp;
+	struct vmw_dma_buffer *dma_buf;
+	uint32_t handle;
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
-	if (unlikely(vmw_user_bo == NULL))
-		return -ENOMEM;
-
 	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0)) {
-		kfree(vmw_user_bo);
+	if (unlikely(ret != 0))
 		return ret;
-	}
 
-	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
-			      &vmw_vram_sys_placement, true,
-			      &vmw_user_dmabuf_destroy);
+	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				    req->size, false, &handle, &dma_buf);
 	if (unlikely(ret != 0))
 		goto out_no_dmabuf;
 
-	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
-	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-				   &vmw_user_bo->base,
-				   false,
-				   ttm_buffer_type,
-				   &vmw_user_dmabuf_release, NULL);
-	if (unlikely(ret != 0))
-		goto out_no_base_object;
-	else {
-		rep->handle = vmw_user_bo->base.hash.key;
-		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
-		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
-		rep->cur_gmr_offset = 0;
-	}
+	rep->handle = handle;
+	rep->map_handle = dma_buf->base.addr_space_offset;
+	rep->cur_gmr_id = handle;
+	rep->cur_gmr_offset = 0;
+
+	vmw_dmabuf_unreference(&dma_buf);
 
-out_no_base_object:
-	ttm_bo_unref(&tmp);
 out_no_dmabuf:
 	ttm_read_unlock(&vmaster->lock);
 
@@ -1657,27 +1733,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 					 TTM_REF_USAGE);
 }
 
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-				  uint32_t cur_validate_node)
-{
-	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-	if (likely(vmw_bo->on_validate_list))
-		return vmw_bo->cur_validate_node;
-
-	vmw_bo->cur_validate_node = cur_validate_node;
-	vmw_bo->on_validate_list = true;
-
-	return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
-	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-	vmw_bo->on_validate_list = false;
-}
-
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 			   uint32_t handle, struct vmw_dma_buffer **out)
 {
@@ -1706,6 +1761,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 	return 0;
 }
 
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+			      struct vmw_dma_buffer *dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+
+	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+		return -EINVAL;
+
+	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+	return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
 /*
  * Stream management
  */
@@ -1730,8 +1797,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
 	struct vmw_resource *res = &stream->res;
 	int ret;
 
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
-				VMW_RES_STREAM, false, res_free, NULL);
+	ret = vmw_resource_init(dev_priv, res, false, res_free,
+				&vmw_stream_func);
 
 	if (unlikely(ret != 0)) {
 		if (res_free == NULL)
@@ -1753,10 +1820,6 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
 	return 0;
 }
 
-/**
- * User-space context management:
- */
-
 static void vmw_user_stream_free(struct vmw_resource *res)
 {
 	struct vmw_user_stream *stream =
@@ -1792,9 +1855,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 	struct vmw_user_stream *stream;
 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 	int ret = 0;
 
-	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 	if (unlikely(res == NULL))
 		return -EINVAL;
 
@@ -1895,7 +1960,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 	struct vmw_resource *res;
 	int ret;
 
-	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+				  *inout_id);
 	if (unlikely(res == NULL))
 		return -EINVAL;
 
@@ -1990,3 +2056,461 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 					 handle, TTM_REF_USAGE);
 }
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+				  bool interruptible)
+{
+	unsigned long size =
+		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	struct vmw_dma_buffer *backup;
+	int ret;
+
+	if (likely(res->backup)) {
+		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		return 0;
+	}
+
+	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+	if (unlikely(backup == NULL))
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+			      res->func->backup_placement,
+			      interruptible,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto out_no_dmabuf;
+
+	res->backup = backup;
+
+out_no_dmabuf:
+	return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf)
+{
+	int ret = 0;
+	const struct vmw_res_func *func = res->func;
+
+	if (unlikely(res->id == -1)) {
+		ret = func->create(res);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	if (func->bind &&
+	    ((func->needs_backup && list_empty(&res->mob_head) &&
+	      val_buf->bo != NULL) ||
+	     (!func->needs_backup && val_buf->bo != NULL))) {
+		ret = func->bind(res, val_buf);
+		if (unlikely(ret != 0))
+			goto out_bind_failed;
+		if (func->needs_backup)
+			list_add_tail(&res->mob_head, &res->backup->res_list);
+	}
+
+	/*
+	 * Only do this on write operations, and move to
+	 * vmw_resource_unreserve if it can be called after
+	 * backup buffers have been unreserved. Otherwise
+	 * sort out locking.
+	 */
+	res->res_dirty = true;
+
+	return 0;
+
+out_bind_failed:
+	func->destroy(res);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+			    struct vmw_dma_buffer *new_backup,
+			    unsigned long new_backup_offset)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (!list_empty(&res->lru_head))
+		return;
+
+	if (new_backup && new_backup != res->backup) {
+
+		if (res->backup) {
+			BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+			list_del_init(&res->mob_head);
+			vmw_dmabuf_unreference(&res->backup);
+		}
+
+		res->backup = vmw_dmabuf_reference(new_backup);
+		BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+		list_add_tail(&res->mob_head, &new_backup->res_list);
+	}
+	if (new_backup)
+		res->backup_offset = new_backup_offset;
+
+	if (!res->func->may_evict)
+		return;
+
+	write_lock(&dev_priv->resource_lock);
+	list_add_tail(&res->lru_head,
+		      &res->dev_priv->res_lru[res->func->res_type]);
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+			      bool interruptible,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+	bool backup_dirty = false;
+	int ret;
+
+	if (unlikely(res->backup == NULL)) {
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	INIT_LIST_HEAD(&val_list);
+	val_buf->bo = ttm_bo_reference(&res->backup->base);
+	val_buf->new_sync_obj_arg =  (void *)((unsigned long)
+					      DRM_VMW_FENCE_FLAG_EXEC);
+	list_add_tail(&val_buf->head, &val_list);
+	ret = ttm_eu_reserve_buffers(&val_list);
+	if (unlikely(ret != 0))
+		goto out_no_reserve;
+
+	if (res->func->needs_backup && list_empty(&res->mob_head))
+		return 0;
+
+	backup_dirty = res->backup_dirty;
+	ret = ttm_bo_validate(&res->backup->base,
+			      res->func->backup_placement,
+			      true, false, false);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+
+	return 0;
+
+out_no_validate:
+	ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+	ttm_bo_unref(&val_buf->bo);
+	if (backup_dirty)
+		vmw_dmabuf_unreference(&res->backup);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	write_lock(&dev_priv->resource_lock);
+	list_del_init(&res->lru_head);
+	write_unlock(&dev_priv->resource_lock);
+
+	if (res->func->needs_backup && res->backup == NULL &&
+	    !no_backup) {
+		ret = vmw_resource_buf_alloc(res, true);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @val_buf:        Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+
+	if (likely(val_buf->bo == NULL))
+		return;
+
+	INIT_LIST_HEAD(&val_list);
+	list_add_tail(&val_buf->head, &val_list);
+	ttm_eu_backoff_reservation(&val_list);
+	ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @res:            The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+	struct ttm_validate_buffer val_buf;
+	const struct vmw_res_func *func = res->func;
+	int ret;
+
+	BUG_ON(!func->may_evict);
+
+	val_buf.bo = NULL;
+	ret = vmw_resource_check_buffer(res, true, &val_buf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(func->unbind != NULL &&
+		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
+		ret = func->unbind(res, res->res_dirty, &val_buf);
+		if (unlikely(ret != 0))
+			goto out_no_unbind;
+		list_del_init(&res->mob_head);
+	}
+	ret = func->destroy(res);
+	res->backup_dirty = true;
+	res->res_dirty = false;
+out_no_unbind:
+	vmw_resource_backoff_reservation(&val_buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+	int ret;
+	struct vmw_resource *evict_res;
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+	struct ttm_validate_buffer val_buf;
+
+	if (likely(!res->func->may_evict))
+		return 0;
+
+	val_buf.bo = NULL;
+	if (res->backup) {
+		val_buf.bo = &res->backup->base;
+		val_buf.new_sync_obj_arg = (void *)(unsigned long)
+			DRM_VMW_FENCE_FLAG_EXEC;
+	}
+	do {
+		ret = vmw_resource_do_validate(res, &val_buf);
+		if (likely(ret != -EBUSY))
+			break;
+
+		write_lock(&dev_priv->resource_lock);
+		if (list_empty(lru_list) || !res->func->may_evict) {
+			DRM_ERROR("Out of device device id entries "
+				  "for %s.\n", res->func->type_name);
+			ret = -EBUSY;
+			write_unlock(&dev_priv->resource_lock);
+			break;
+		}
+
+		evict_res = vmw_resource_reference
+			(list_first_entry(lru_list, struct vmw_resource,
+					  lru_head));
+		list_del_init(&evict_res->lru_head);
+
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+	else if (!res->func->needs_backup && res->backup) {
+		list_del_init(&res->mob_head);
+		vmw_dmabuf_unreference(&res->backup);
+	}
+
+	return 0;
+
+out_no_validate:
+	return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ * @sync_obj_arg:   Fence flags cast to a void *.
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+			 struct vmw_fence_obj *fence,
+			 void *sync_obj_arg)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct vmw_fence_obj *old_fence_obj;
+	struct vmw_private *dev_priv =
+		container_of(bdev, struct vmw_private, bdev);
+
+	if (fence == NULL)
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	else
+		driver->sync_obj_ref(fence);
+
+	spin_lock(&bdev->fence_lock);
+
+	old_fence_obj = bo->sync_obj;
+	bo->sync_obj = fence;
+	bo->sync_obj_arg = sync_obj_arg;
+
+	spin_unlock(&bdev->fence_lock);
+
+	if (old_fence_obj)
+		vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+			      struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+	return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+				    enum vmw_res_type type)
+{
+	struct list_head *lru_list = &dev_priv->res_lru[type];
+	struct vmw_resource *evict_res;
+
+	do {
+		write_lock(&dev_priv->resource_lock);
+
+		if (list_empty(lru_list))
+			goto out_unlock;
+
+		evict_res = vmw_resource_reference(
+			list_first_entry(lru_list, struct vmw_resource,
+					 lru_head));
+		list_del_init(&evict_res->lru_head);
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+out_unlock:
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+	enum vmw_res_type type;
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	for (type = 0; type < vmw_res_max; ++type)
+		vmw_resource_evict_type(dev_priv, type);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
-- 
1.7.4.4



More information about the dri-devel mailing list