[Mesa-dev] [PATCH 1/9] r600g/compute: Add an intermediate resource for OpenCL buffers

Tom Stellard tom at stellard.net
Mon Jun 16 06:45:39 PDT 2014


On Fri, Jun 13, 2014 at 10:35:30PM +0200, Bruno Jiménez wrote:
> This patch changes completely the way buffers are added to the
> compute_memory_pool. Before this, whenever we were going to
> map a buffer or write to or read from it, it would get placed
> into the pool. Now, every unallocated buffer has its own
> r600_resource until it is allocated in the pool.
> 
> NOTE: This patch also increase the GPU memory usage at the moment
> of putting every buffer in it's place. More or less, the memory
> usage is ~2x(sum of every buffer size)
> 
> v2: Cleanup
> ---
>  src/gallium/drivers/r600/compute_memory_pool.c | 21 ++++++++++++++++++++-
>  src/gallium/drivers/r600/compute_memory_pool.h |  2 ++
>  src/gallium/drivers/r600/evergreen_compute.c   | 18 +++++++++++++-----
>  3 files changed, 35 insertions(+), 6 deletions(-)
> 
> diff --git a/src/gallium/drivers/r600/compute_memory_pool.c b/src/gallium/drivers/r600/compute_memory_pool.c
> index ec8c470..94ddcde 100644
> --- a/src/gallium/drivers/r600/compute_memory_pool.c
> +++ b/src/gallium/drivers/r600/compute_memory_pool.c
> @@ -71,7 +71,6 @@ static void compute_memory_pool_init(struct compute_memory_pool * pool,
>  	if (pool->shadow == NULL)
>  		return;
>  
> -	pool->next_id = 1;
>  	pool->size_in_dw = initial_size_in_dw;
>  	pool->bo = (struct r600_resource*)r600_compute_buffer_alloc_vram(pool->screen,
>  							pool->size_in_dw * 4);
> @@ -365,6 +364,18 @@ int compute_memory_finalize_pending(struct compute_memory_pool* pool,
>  			pool->item_list = item;
>  		}
>  
> +		((struct r600_context *)pipe)->b.b.resource_copy_region(pipe,
> +				(struct pipe_resource *)pool->bo,
> +				0, item->start_in_dw * 4, 0 ,0,
> +				(struct pipe_resource *)item->real_buffer,
> +				0, &(struct pipe_box) {.width = item->size_in_dw * 4,
> +				.height = 1, .depth = 1});
> +
> +		pool->screen->b.b.resource_destroy(
> +			(struct pipe_screen *)pool->screen,
> +			(struct pipe_resource *)item->real_buffer);

You should use temporary variables rather than inlining the casts everywhere.
It will make the code easier to read. Make sure to declare the at the beginning of
the function or basic block.

> +		item->real_buffer = NULL;
> +
>  		allocated += item->size_in_dw;
>  	}
>  
> @@ -393,6 +404,12 @@ void compute_memory_free(struct compute_memory_pool* pool, int64_t id)
>  				item->next->prev = item->prev;
>  			}
>  
> +			if (item->real_buffer) {
> +				pool->screen->b.b.resource_destroy(
> +						(struct pipe_screen *)pool->screen,
> +						(struct pipe_resource *)item->real_buffer);
> +			}
> +

Same thing here with the casts.

>  			free(item);
>  
>  			return;
> @@ -426,6 +443,8 @@ struct compute_memory_item* compute_memory_alloc(
>  	new_item->start_in_dw = -1; /* mark pending */
>  	new_item->id = pool->next_id++;
>  	new_item->pool = pool;
> +	new_item->real_buffer = (struct r600_resource*)r600_compute_buffer_alloc_vram(
> +							pool->screen, size_in_dw * 4);
>  
>  	if (pool->item_list) {
>  		for (last_item = pool->item_list; last_item->next;
> diff --git a/src/gallium/drivers/r600/compute_memory_pool.h b/src/gallium/drivers/r600/compute_memory_pool.h
> index c711c59..e94159c 100644
> --- a/src/gallium/drivers/r600/compute_memory_pool.h
> +++ b/src/gallium/drivers/r600/compute_memory_pool.h
> @@ -38,6 +38,8 @@ struct compute_memory_item
>  	int64_t start_in_dw; ///Start pointer in dwords relative in the pool bo
>  	int64_t size_in_dw; ///Size of the chunk in dwords
>  
> +	struct r600_resource *real_buffer;
> +
>  	struct compute_memory_pool* pool;
>  
>  	struct compute_memory_item* prev;
> diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c
> index a2abf15..c152e54 100644
> --- a/src/gallium/drivers/r600/evergreen_compute.c
> +++ b/src/gallium/drivers/r600/evergreen_compute.c
> @@ -958,6 +958,17 @@ void *r600_compute_global_transfer_map(
>  	struct r600_resource_global* buffer =
>  		(struct r600_resource_global*)resource;
>  
> +	struct pipe_resource *dst;
> +	unsigned offset = box->x;
> +
> +	if (buffer->chunk->real_buffer) {
> +		dst = (struct pipe_resource*)buffer->chunk->real_buffer;
> +	}
> +	else {
> +		dst = (struct pipe_resource*)buffer->chunk->pool->bo;
> +		offset += (buffer->chunk->start_in_dw * 4);
> +	}
> +
>  	COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
>  			"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
>  			"width = %u, height = %u, depth = %u)\n", level, usage,
> @@ -967,8 +978,6 @@ void *r600_compute_global_transfer_map(
>  		"%u (box.x)\n", buffer->chunk->id, box->x);
>  
>  
> -	compute_memory_finalize_pending(pool, ctx_);
> -
>  	assert(resource->target == PIPE_BUFFER);
>  	assert(resource->bind & PIPE_BIND_GLOBAL);
>  	assert(box->x >= 0);
> @@ -976,9 +985,8 @@ void *r600_compute_global_transfer_map(
>  	assert(box->z == 0);
>  
>  	///TODO: do it better, mapping is not possible if the pool is too big
> -	return pipe_buffer_map_range(ctx_, (struct pipe_resource*)buffer->chunk->pool->bo,
> -			box->x + (buffer->chunk->start_in_dw * 4),
> -			box->width, usage, ptransfer);
> +	return pipe_buffer_map_range(ctx_, dst,
> +			offset, box->width, usage, ptransfer);
>  }
>  
>  void r600_compute_global_transfer_unmap(
> -- 
> 2.0.0
> 
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/mesa-dev


More information about the mesa-dev mailing list