[Mesa-dev] [PATCH 2/2] radeonsi: always place sparse buffers in VRAM

Dieter Nützel Dieter at nuetzel-hh.de
Wed Nov 29 01:57:39 UTC 2017


For the series:

Tested-by: Dieter Nützel <Dieter at nuetzel-hh.de>

on RX580

with UH, UV, DiRT Rally, some Wine apps - LS2015/LS2017

Dieter

Am 28.11.2017 14:44, schrieb Nicolai Hähnle:
> From: Nicolai Hähnle <nicolai.haehnle at amd.com>
> 
> Together with "radeonsi: fix the R600_RESOURCE_FLAG_UNMAPPABLE check",
> this ensures that sparse buffers are placed in VRAM.
> 
> Noticed by an assertion that started triggering with commit d4fac1e1d7
> ("gallium/radeon: enable suballocations for VRAM with no CPU access")
> 
> Fixes KHR-GL45.sparse_buffer_tests.BufferStorageTest in debug builds.
> ---
>  src/gallium/drivers/radeon/r600_buffer_common.c | 3 +++
>  src/gallium/winsys/amdgpu/drm/amdgpu_bo.c       | 5 +++--
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c
> b/src/gallium/drivers/radeon/r600_buffer_common.c
> index 158cabcfaeb..36a29181edc 100644
> --- a/src/gallium/drivers/radeon/r600_buffer_common.c
> +++ b/src/gallium/drivers/radeon/r600_buffer_common.c
> @@ -616,20 +616,23 @@ r600_alloc_buffer_struct(struct pipe_screen 
> *screen,
>  	return rbuffer;
>  }
> 
>  struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
>  				       const struct pipe_resource *templ,
>  				       unsigned alignment)
>  {
>  	struct r600_common_screen *rscreen = (struct 
> r600_common_screen*)screen;
>  	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, 
> templ);
> 
> +	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
> +		rbuffer->b.b.flags |= R600_RESOURCE_FLAG_UNMAPPABLE;
> +
>  	si_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
> 
>  	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
>  		rbuffer->flags |= RADEON_FLAG_SPARSE;
> 
>  	if (!si_alloc_resource(rscreen, rbuffer)) {
>  		FREE(rbuffer);
>  		return NULL;
>  	}
>  	return &rbuffer->b.b;
> diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
> b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
> index c3e97c22861..d6d5805deed 100644
> --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
> +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
> @@ -1143,20 +1143,23 @@ amdgpu_bo_create(struct radeon_winsys *rws,
>     struct amdgpu_winsys *ws = amdgpu_winsys(rws);
>     struct amdgpu_winsys_bo *bo;
>     unsigned usage = 0, pb_cache_bucket = 0;
> 
>     /* VRAM implies WC. This is not optional. */
>     assert(!(domain & RADEON_DOMAIN_VRAM) || flags & 
> RADEON_FLAG_GTT_WC);
> 
>     /* NO_CPU_ACCESS is valid with VRAM only. */
>     assert(domain == RADEON_DOMAIN_VRAM || !(flags &
> RADEON_FLAG_NO_CPU_ACCESS));
> 
> +   /* Sparse buffers must have NO_CPU_ACCESS set. */
> +   assert(!(flags & RADEON_FLAG_SPARSE) || flags & 
> RADEON_FLAG_NO_CPU_ACCESS);
> +
>     /* Sub-allocate small buffers from slabs. */
>     if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
>         size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
>         alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2,
> util_next_power_of_two(size))) {
>        struct pb_slab_entry *entry;
>        int heap = radeon_get_heap_index(domain, flags);
> 
>        if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
>           goto no_slab;
> 
> @@ -1175,22 +1178,20 @@ amdgpu_bo_create(struct radeon_winsys *rws,
> 
>        pipe_reference_init(&bo->base.reference, 1);
> 
>        return &bo->base;
>     }
>  no_slab:
> 
>     if (flags & RADEON_FLAG_SPARSE) {
>        assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
> 
> -      flags |= RADEON_FLAG_NO_CPU_ACCESS;
> -
>        return amdgpu_bo_sparse_create(ws, size, domain, flags);
>     }
> 
>     /* This flag is irrelevant for the cache. */
>     flags &= ~RADEON_FLAG_NO_SUBALLOC;
> 
>     /* Align size to page size. This is the minimum alignment for 
> normal
>      * BOs. Aligning this here helps the cached bufmgr. Especially 
> small BOs,
>      * like constant/uniform buffers, can benefit from better and more 
> reuse.
>      */


More information about the mesa-dev mailing list