[PATCH 1/4] drm/vmwgfx: remove use of fence_obj_args

Thomas Hellstrom thomas at shipmail.org
Thu Oct 25 00:55:57 PDT 2012


On 10/12/2012 05:01 PM, Maarten Lankhorst wrote:
> It's always hardcoded to the same value.
>
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
> ---
>   drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c   |    7 ++-----
>   drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |   15 ++-------------
>   drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |    4 ----
>   3 files changed, 4 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> index 9826fbc..da12922 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> @@ -318,19 +318,16 @@ static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
>   
>   static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
>   {
> -	unsigned long flags = (unsigned long) sync_arg;
>   	return	vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
> -				       (uint32_t) flags);
> +				       DRM_VMW_FENCE_FLAG_EXEC);
>   
>   }
>   
>   static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
>   			     bool lazy, bool interruptible)
>   {
> -	unsigned long flags = (unsigned long) sync_arg;
> -
>   	return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
> -				  (uint32_t) flags,
> +				  DRM_VMW_FENCE_FLAG_EXEC,
>   				  lazy, interruptible,
>   				  VMW_FENCE_WAIT_TIMEOUT);
>   }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 30654b4..e5775a0 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -71,7 +71,6 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
>    */
>   static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
>   				   struct ttm_buffer_object *bo,
> -				   uint32_t fence_flags,
>   				   uint32_t *p_val_node)
>   {
>   	uint32_t val_node;
> @@ -87,15 +86,12 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
>   
>   	val_buf = &sw_context->val_bufs[val_node];
>   	if (unlikely(val_node == sw_context->cur_val_buf)) {
> -		val_buf->new_sync_obj_arg = NULL;
>   		val_buf->bo = ttm_bo_reference(bo);
>   		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
>   		++sw_context->cur_val_buf;
>   	}
>   
> -	val_buf->new_sync_obj_arg = (void *)
> -		((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
> -	sw_context->fence_flags |= fence_flags;
> +	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
>   
>   	if (p_val_node)
>   		*p_val_node = val_node;
> @@ -313,7 +309,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
>   			cid_to_add = sw_context->cur_query_cid;
>   			ret = vmw_bo_to_validate_list(sw_context,
>   						      sw_context->cur_query_bo,
> -						      DRM_VMW_FENCE_FLAG_EXEC,
>   						      NULL);
>   			if (unlikely(ret != 0))
>   				return ret;
> @@ -322,7 +317,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
>   
>   		ret = vmw_bo_to_validate_list(sw_context,
>   					      dev_priv->dummy_query_bo,
> -					      DRM_VMW_FENCE_FLAG_EXEC,
>   					      NULL);
>   		if (unlikely(ret != 0))
>   			return ret;
> @@ -346,7 +340,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
>   				      &sw_context->query_list);
>   		ret = vmw_bo_to_validate_list(sw_context,
>   					      dev_priv->dummy_query_bo,
> -					      DRM_VMW_FENCE_FLAG_EXEC,
>   					      NULL);
>   		if (unlikely(ret != 0))
>   			return ret;
> @@ -465,8 +458,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
>   	reloc = &sw_context->relocs[sw_context->cur_reloc++];
>   	reloc->location = ptr;
>   
> -	ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
> -				      &reloc->index);
> +	ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
>   	if (unlikely(ret != 0))
>   		goto out_no_reloc;
>   
> @@ -1290,12 +1282,9 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
>   
>   	INIT_LIST_HEAD(&validate_list);
>   
> -	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
> -		DRM_VMW_FENCE_FLAG_EXEC;
>   	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
>   	list_add_tail(&pinned_val.head, &validate_list);
>   
> -	query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
>   	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
>   	list_add_tail(&query_val.head, &validate_list);
>   
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index da3c6b5..0ca3e1c 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -833,8 +833,6 @@ int vmw_surface_do_validate(struct vmw_private *dev_priv,
>   	if (srf->backup) {
>   		INIT_LIST_HEAD(&val_list);
>   		val_buf.bo = ttm_bo_reference(srf->backup);
> -		val_buf.new_sync_obj_arg = (void *)((unsigned long)
> -						    DRM_VMW_FENCE_FLAG_EXEC);
>   		list_add_tail(&val_buf.head, &val_list);
>   		ret = ttm_eu_reserve_buffers(&val_list);
>   		if (unlikely(ret != 0))
> @@ -966,8 +964,6 @@ int vmw_surface_evict(struct vmw_private *dev_priv,
>   
>   	INIT_LIST_HEAD(&val_list);
>   	val_buf.bo = ttm_bo_reference(srf->backup);
> -	val_buf.new_sync_obj_arg = (void *)(unsigned long)
> -		DRM_VMW_FENCE_FLAG_EXEC;
>   	list_add_tail(&val_buf.head, &val_list);
>   	ret = ttm_eu_reserve_buffers(&val_list);
>   	if (unlikely(ret != 0))
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
For the series:
Reviewed-By: Thomas Hellstrom <thellstrom at vmware.com>





More information about the dri-devel mailing list