[PATCH v2 4/4] drm/panthor: Fix an off-by-one in the heap context retrieval logic

Steven Price steven.price at arm.com
Thu May 2 14:03:51 UTC 2024


On 30/04/2024 12:28, Boris Brezillon wrote:
> ID 0 is reserved to encode 'no-tiler-heap', the heap ID range is
> [1:MAX_HEAPS_PER_POOL], which we occasionally need to turn into an index
> in the [0:MAX_HEAPS_PER_POOL-1] when we want to access the context object.

This might be a silly question, but do we need ID 0 to be
"no-tiler-heap"? Would it be easier to e.g. use a negative number for
that situation and avoid all the off-by-one problems?

I'm struggling to find the code which needs the 0 value to be special -
where is it exactly that we encode this "no-tiler-heap" value?

Steve

> 
> v2:
> - New patch
> 
> Fixes: 9cca48fa4f89 ("drm/panthor: Add the heap logical block")
> Reported-by: Eric Smith <eric.smith at collabora.com>
> Signed-off-by: Boris Brezillon <boris.brezillon at collabora.com>
> Tested-by: Eric Smith <eric.smith at collabora.com>
> ---
>  drivers/gpu/drm/panthor/panthor_heap.c | 35 +++++++++++++++++++-------
>  1 file changed, 26 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
> index 683bb94761bc..b1a7dbf25fb2 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.c
> +++ b/drivers/gpu/drm/panthor/panthor_heap.c
> @@ -109,7 +109,11 @@ static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
>  
>  static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id)
>  {
> -	return panthor_heap_ctx_stride(pool->ptdev) * id;
> +	/* ID 0 is reserved to encode 'no-tiler-heap', the valid range
> +	 * is [1:MAX_HEAPS_PER_POOL], which we need to turn into a
> +	 * [0:MAX_HEAPS_PER_POOL-1] context index, hence the minus one here.
> +	 */
> +	return panthor_heap_ctx_stride(pool->ptdev) * (id - 1);
>  }
>  
>  static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
> @@ -118,6 +122,21 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
>  	       panthor_get_heap_ctx_offset(pool, id);
>  }
>  
> +static int panthor_get_heap_ctx_id(struct panthor_heap_pool *pool,
> +				   u64 heap_ctx_gpu_va)
> +{
> +	u64 offset = heap_ctx_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
> +	u32 heap_idx = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
> +
> +	if (offset > U32_MAX || heap_idx >= MAX_HEAPS_PER_POOL)
> +		return -EINVAL;
> +
> +	/* ID 0 is reserved to encode 'no-tiler-heap', the valid range
> +	 * is [1:MAX_HEAPS_PER_POOL], hence the plus one here.
> +	 */
> +	return heap_idx + 1;
> +}
> +
>  static void panthor_free_heap_chunk(struct panthor_vm *vm,
>  				    struct panthor_heap *heap,
>  				    struct panthor_heap_chunk *chunk)
> @@ -364,14 +383,13 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
>  			      u64 heap_gpu_va,
>  			      u64 chunk_gpu_va)
>  {
> -	u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
> -	u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
> +	int heap_id = panthor_get_heap_ctx_id(pool, heap_gpu_va);
>  	struct panthor_heap_chunk *chunk, *tmp, *removed = NULL;
>  	struct panthor_heap *heap;
>  	int ret;
>  
> -	if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
> -		return -EINVAL;
> +	if (heap_id < 0)
> +		return heap_id;
>  
>  	down_read(&pool->lock);
>  	heap = xa_load(&pool->xa, heap_id);
> @@ -427,14 +445,13 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
>  		      u32 pending_frag_count,
>  		      u64 *new_chunk_gpu_va)
>  {
> -	u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
> -	u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
> +	int heap_id = panthor_get_heap_ctx_id(pool, heap_gpu_va);
>  	struct panthor_heap_chunk *chunk;
>  	struct panthor_heap *heap;
>  	int ret;
>  
> -	if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
> -		return -EINVAL;
> +	if (heap_id < 0)
> +		return heap_id;
>  
>  	down_read(&pool->lock);
>  	heap = xa_load(&pool->xa, heap_id);



More information about the dri-devel mailing list