[PATCH 26/33] drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf

Thomas Hellstrom thellstrom at vmware.com
Thu Jan 16 12:39:14 PST 2014


The device is no longer capable of scrubbing context bindings of resources
that are bound when destroyed.

Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob at vmware.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 216 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  65 ++++++++++
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |  95 ++++++++++++--
 3 files changed, 365 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 308e78f..b4de756 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -34,6 +34,10 @@ struct vmw_user_context {
 	struct vmw_resource res;
 };
 
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -45,6 +49,9 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
 
 static uint64_t vmw_user_context_size;
 
@@ -82,6 +89,11 @@ static const struct vmw_res_func vmw_gb_context_func = {
 	.unbind = vmw_gb_context_unbind
 };
 
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
 /**
  * Context management:
  */
@@ -494,3 +506,207 @@ out_unlock:
 	return ret;
 
 }
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = bi->i1.shader_type;
+	cmd->body.shid = SVGA3D_INVALID_ID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for render target "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = bi->i1.rt_type;
+	cmd->body.target.sid = SVGA3D_INVALID_ID;
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for texture "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = bi->i1.texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+	list_del(&cb->ctx_list);
+	cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_ctx_binding *loc;
+
+	switch (bi->bt) {
+	case vmw_ctx_binding_rt:
+		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+			DRM_ERROR("Illegal render target type %u.\n",
+				  (unsigned) bi->i1.rt_type);
+			return -EINVAL;
+		}
+		loc = &cbs->render_targets[bi->i1.rt_type];
+		break;
+	case vmw_ctx_binding_tex:
+		if (unlikely((unsigned)bi->i1.texture_stage >=
+			     SVGA3D_NUM_TEXTURE_UNITS)) {
+			DRM_ERROR("Illegal texture/sampler unit %u.\n",
+				  (unsigned) bi->i1.texture_stage);
+			return -EINVAL;
+		}
+		loc = &cbs->texture_units[bi->i1.texture_stage];
+		break;
+	case vmw_ctx_binding_shader:
+		if (unlikely((unsigned)bi->i1.shader_type >=
+			     SVGA3D_SHADERTYPE_MAX)) {
+			DRM_ERROR("Illegal shader type %u.\n",
+				  (unsigned) bi->i1.shader_type);
+			return -EINVAL;
+		}
+		loc = &cbs->shaders[bi->i1.shader_type];
+		break;
+	default:
+		BUG();
+	}
+
+	if (loc->bi.ctx != NULL)
+		vmw_context_binding_drop(loc);
+
+	loc->bi = *bi;
+	list_add_tail(&loc->ctx_list, &cbs->list);
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+	(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+	vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_binding *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) {
+		vmw_context_binding_kill(entry);
+	}
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 76751e9..a962e4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -245,6 +245,67 @@ struct vmw_piter {
 	struct page *(*page)(struct vmw_piter *);
 };
 
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+	struct vmw_resource *ctx;
+	enum vmw_ctx_binding_type bt;
+	union {
+		SVGA3dShaderType shader_type;
+		SVGA3dRenderTargetType rt_type;
+		uint32 texture_stage;
+	} i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ *                        - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+	struct list_head ctx_list;
+	struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct list_head list;
+	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
 	bool res_ht_initialized;
@@ -266,6 +327,7 @@ struct vmw_sw_context{
 	struct vmw_resource *last_query_ctx;
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
+	struct vmw_ctx_binding_state staged_bindings;
 };
 
 struct vmw_legacy_display;
@@ -876,6 +938,9 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *ci);
+extern void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
 
 /*
  * Surface management - vmwgfx_surface.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dd5a9a2..8eb87d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
  * @res: Ref-counted pointer to the resource.
  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  * @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
@@ -65,6 +67,7 @@ struct vmw_resource_val_node {
 	struct drm_hash_item hash;
 	struct vmw_resource *res;
 	struct vmw_dma_buffer *new_backup;
+	struct vmw_ctx_binding_state *staged_bindings;
 	unsigned long new_backup_offset;
 	bool first_usage;
 	bool no_buffer_needed;
@@ -106,6 +109,11 @@ static void vmw_resource_list_unreserve(struct list_head *list,
 		struct vmw_dma_buffer *new_backup =
 			backoff ? NULL : val->new_backup;
 
+		if (unlikely(val->staged_bindings)) {
+			vmw_context_binding_state_kill(val->staged_bindings);
+			kfree(val->staged_bindings);
+			val->staged_bindings = NULL;
+		}
 		vmw_resource_unreserve(res, new_backup,
 			val->new_backup_offset);
 		vmw_dmabuf_unreference(&val->new_backup);
@@ -389,8 +397,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 	struct vmw_resource_val_node *node;
 	int ret;
 
-	if (*id == SVGA3D_INVALID_ID)
+	if (*id == SVGA3D_INVALID_ID) {
+		if (p_val)
+			*p_val = NULL;
+		if (res_type == vmw_res_context) {
+			DRM_ERROR("Illegal context invalid id.\n");
+			return -EINVAL;
+		}
 		return 0;
+	}
 
 	/*
 	 * Fastpath in case of repeated commands referencing the same
@@ -438,6 +453,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 	rcache->node = node;
 	if (p_val)
 		*p_val = node;
+
+	if (node->first_usage && res_type == vmw_res_context) {
+		node->staged_bindings =
+			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+		if (node->staged_bindings == NULL) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			goto out_no_reloc;
+		}
+		INIT_LIST_HEAD(&node->staged_bindings->list);
+	}
+
 	vmw_resource_unreference(&res);
 	return 0;
 
@@ -480,17 +507,33 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetRenderTarget body;
 	} *cmd;
+	struct vmw_resource_val_node *ctx_node;
 	int ret;
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
-	cmd = container_of(header, struct vmw_sid_cmd, header);
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
 				&cmd->body.target.sid, NULL);
-	return ret;
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (dev_priv->has_mob) {
+		struct vmw_ctx_bindinfo bi;
+
+		bi.ctx = ctx_node->res;
+		bi.bt = vmw_ctx_binding_rt;
+		bi.i1.rt_type = cmd->body.type;
+		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	}
+
+	return 0;
 }
 
 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -1145,15 +1188,21 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 	struct vmw_tex_state_cmd {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetTextureState state;
-	};
+	} *cmd;
 
 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
 	  ((unsigned long) header + header->size + sizeof(header));
 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+	struct vmw_resource_val_node *ctx_node;
 	int ret;
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	cmd = container_of(header, struct vmw_tex_state_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->state.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1166,6 +1215,16 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 					&cur_state->value, NULL);
 		if (unlikely(ret != 0))
 			return ret;
+
+		if (dev_priv->has_mob) {
+			struct vmw_ctx_bindinfo bi;
+
+			bi.ctx = ctx_node->res;
+			bi.bt = vmw_ctx_binding_tex;
+			bi.i1.texture_stage = cur_state->stage;
+			vmw_context_binding_add(ctx_node->staged_bindings,
+						&bi);
+		}
 	}
 
 	return 0;
@@ -1426,20 +1485,32 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetShader body;
 	} *cmd;
+	struct vmw_resource_val_node *ctx_node;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
 			   header);
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
+	if (dev_priv->has_mob) {
+		struct vmw_ctx_bindinfo bi;
+
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+					user_shader_converter,
+					&cmd->body.shid, NULL);
+		if (unlikely(ret != 0))
+			return ret;
 
-	if (dev_priv->has_mob)
-		return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
-					 user_shader_converter,
-					 &cmd->body.shid, NULL);
+		bi.ctx = ctx_node->res;
+		bi.bt = vmw_ctx_binding_shader;
+		bi.i1.shader_type = cmd->body.type;
+		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	}
 
 	return 0;
 }
@@ -1820,6 +1891,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
 	list_for_each_entry_safe(val, val_next, list, head) {
 		list_del_init(&val->head);
 		vmw_resource_unreference(&val->res);
+		if (unlikely(val->staged_bindings))
+			kfree(val->staged_bindings);
 		kfree(val);
 	}
 }
-- 
1.8.3.2


More information about the dri-devel mailing list