[Mesa-dev] [PATCH] gallium/radeon: Rename gart_page_size member to bo_alignment

Michel Dänzer michel at daenzer.net
Wed May 11 06:22:27 UTC 2016


From: Michel Dänzer <michel.daenzer at amd.com>

This should make its meaning clearer.

The value it holds is the CPU page size, which can be different from
the GART page size.

Signed-off-by: Michel Dänzer <michel.daenzer at amd.com>
---
 src/gallium/drivers/r300/r300_query.c             |  4 +--
 src/gallium/drivers/radeon/r600_pipe_common.c     |  4 +--
 src/gallium/drivers/radeon/r600_query.c           |  2 +-
 src/gallium/drivers/radeon/radeon_winsys.h        |  2 +-
 src/gallium/drivers/radeonsi/si_debug.c           |  8 +++---
 src/gallium/winsys/amdgpu/drm/amdgpu_bo.c         | 24 +++++++++---------
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c         |  6 ++---
 src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c     |  2 +-
 src/gallium/winsys/radeon/drm/radeon_drm_bo.c     | 30 +++++++++++------------
 src/gallium/winsys/radeon/drm/radeon_drm_winsys.c |  4 +--
 10 files changed, 43 insertions(+), 43 deletions(-)

diff --git a/src/gallium/drivers/r300/r300_query.c b/src/gallium/drivers/r300/r300_query.c
index 79e2198..a7a90f7 100644
--- a/src/gallium/drivers/r300/r300_query.c
+++ b/src/gallium/drivers/r300/r300_query.c
@@ -59,8 +59,8 @@ static struct pipe_query *r300_create_query(struct pipe_context *pipe,
         q->num_pipes = r300screen->info.r300_num_gb_pipes;
 
     q->buf = r300->rws->buffer_create(r300->rws,
-                                      r300screen->info.gart_page_size,
-                                      r300screen->info.gart_page_size,
+                                      r300screen->info.bo_alignment,
+                                      r300screen->info.bo_alignment,
                                       RADEON_DOMAIN_GTT, 0);
     if (!q->buf) {
         FREE(q);
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index a5966d9..2a9f064 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -369,7 +369,7 @@ bool r600_common_context_init(struct r600_common_context *rctx,
 	cayman_init_msaa(&rctx->b);
 
 	rctx->allocator_so_filled_size =
-		u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
+		u_suballocator_create(&rctx->b, rscreen->info.bo_alignment,
 				      4, 0, PIPE_USAGE_DEFAULT, TRUE);
 	if (!rctx->allocator_so_filled_size)
 		return false;
@@ -929,7 +929,7 @@ struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
 
 	if (templ->target == PIPE_BUFFER) {
 		return r600_buffer_create(screen, templ,
-					  rscreen->info.gart_page_size);
+					  rscreen->info.bo_alignment);
 	} else {
 		return r600_texture_create(screen, templ);
 	}
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index 9f12ccd..c1b6207 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -263,7 +263,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_common_context *c
 						   struct r600_query_hw *query)
 {
 	unsigned buf_size = MAX2(query->result_size,
-				 ctx->screen->info.gart_page_size);
+				 ctx->screen->info.bo_alignment);
 
 	/* Queries are normally read by the CPU after
 	 * being written by the gpu, hence staging is probably a good
diff --git a/src/gallium/drivers/radeon/radeon_winsys.h b/src/gallium/drivers/radeon/radeon_winsys.h
index e73fa14..385e386 100644
--- a/src/gallium/drivers/radeon/radeon_winsys.h
+++ b/src/gallium/drivers/radeon/radeon_winsys.h
@@ -242,7 +242,7 @@ struct radeon_info {
     uint32_t                    pci_id;
     enum radeon_family          family;
     enum chip_class             chip_class;
-    uint32_t                    gart_page_size;
+    uint32_t                    bo_alignment; /* BO address & size alignment */
     uint64_t                    gart_size;
     uint64_t                    vram_size;
     bool                        has_dedicated_vram;
diff --git a/src/gallium/drivers/radeonsi/si_debug.c b/src/gallium/drivers/radeonsi/si_debug.c
index f227e5a..dada502 100644
--- a/src/gallium/drivers/radeonsi/si_debug.c
+++ b/src/gallium/drivers/radeonsi/si_debug.c
@@ -597,8 +597,8 @@ static void si_dump_last_bo_list(struct si_context *sctx, FILE *f)
 		"VM end page           Usage" COLOR_RESET "\n");
 
 	for (i = 0; i < sctx->last_bo_count; i++) {
-		/* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
-		const unsigned page_size = sctx->b.screen->info.gart_page_size;
+		/* Note: Buffer sizes are expected to be aligned by the winsys. */
+		const unsigned alignment = sctx->b.screen->info.bo_alignment;
 		uint64_t va = sctx->last_bo_list[i].vm_address;
 		uint64_t size = sctx->last_bo_list[i].buf->size;
 		bool hit = false;
@@ -610,13 +610,13 @@ static void si_dump_last_bo_list(struct si_context *sctx, FILE *f)
 
 			if (va > previous_va_end) {
 				fprintf(f, "  %10"PRIu64"    -- hole --\n",
-					(va - previous_va_end) / page_size);
+					(va - previous_va_end) / alignment);
 			}
 		}
 
 		/* Print the buffer. */
 		fprintf(f, "  %10"PRIu64"    0x%013"PRIx64"       0x%013"PRIx64"       ",
-			size / page_size, va / page_size, (va + size) / page_size);
+			size / alignment, va / alignment, (va + size) / alignment);
 
 		/* Print the usage. */
 		for (j = 0; j < 64; j++) {
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index ed12f30..b5c607c 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -137,9 +137,9 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
       amdgpu_fence_reference(&bo->fence[i], NULL);
 
    if (bo->initial_domain & RADEON_DOMAIN_VRAM)
-      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
+      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.bo_alignment);
    else if (bo->initial_domain & RADEON_DOMAIN_GTT)
-      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
+      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.bo_alignment);
    FREE(bo);
 }
 
@@ -327,9 +327,9 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
 
    if (initial_domain & RADEON_DOMAIN_VRAM)
-      ws->allocated_vram += align64(size, ws->info.gart_page_size);
+      ws->allocated_vram += align64(size, ws->info.bo_alignment);
    else if (initial_domain & RADEON_DOMAIN_GTT)
-      ws->allocated_gtt += align64(size, ws->info.gart_page_size);
+      ws->allocated_gtt += align64(size, ws->info.bo_alignment);
 
    amdgpu_add_buffer_to_global_list(bo);
 
@@ -465,12 +465,12 @@ amdgpu_bo_create(struct radeon_winsys *rws,
    struct amdgpu_winsys_bo *bo;
    unsigned usage = 0;
 
-   /* Align size to page size. This is the minimum alignment for normal
-    * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
-    * like constant/uniform buffers, can benefit from better and more reuse.
+   /* This is the minimum address & size alignment for normal BOs. Aligning
+    * these here helps the cached bufmgr. Especially small BOs, like
+    * constant/uniform buffers, can benefit from better and more reuse.
     */
-   size = align64(size, ws->info.gart_page_size);
-   alignment = align(alignment, ws->info.gart_page_size);
+   size = align64(size, ws->info.bo_alignment);
+   alignment = align(alignment, ws->info.bo_alignment);
 
    /* Only set one usage bit each for domains and flags, or the cache manager
     * might consider different sets of domains / flags compatible
@@ -577,9 +577,9 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
       *offset = whandle->offset;
 
    if (bo->initial_domain & RADEON_DOMAIN_VRAM)
-      ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
+      ws->allocated_vram += align64(bo->base.size, ws->info.bo_alignment);
    else if (bo->initial_domain & RADEON_DOMAIN_GTT)
-      ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
+      ws->allocated_gtt += align64(bo->base.size, ws->info.bo_alignment);
 
    amdgpu_add_buffer_to_global_list(bo);
 
@@ -669,7 +669,7 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
     bo->initial_domain = RADEON_DOMAIN_GTT;
     bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
 
-    ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
+    ws->allocated_gtt += align64(bo->base.size, ws->info.bo_alignment);
 
     amdgpu_add_buffer_to_global_list(bo);
 
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index a5d7033..e42a55a 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -138,8 +138,8 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
       return NULL;
    }
 
-   alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
-   alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
+   alloc_buffer.alloc_size = ctx->ws->info.bo_alignment;
+   alloc_buffer.phys_alignment = ctx->ws->info.bo_alignment;
    alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
 
    r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
@@ -237,7 +237,7 @@ static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
       ib->used_ib_space = 0;
 
       ib->big_ib_buffer = ws->buffer_create(ws, buffer_size,
-                                            aws->info.gart_page_size,
+                                            aws->info.bo_alignment,
                                             RADEON_DOMAIN_GTT,
                                             RADEON_FLAG_CPU_ACCESS);
       if (!ib->big_ib_buffer)
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index ad4f21f..713427bb 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -292,7 +292,7 @@ static boolean do_winsys_init(struct amdgpu_winsys *ws, int fd)
    memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
           sizeof(ws->amdinfo.gb_macro_tile_mode));
 
-   ws->info.gart_page_size = alignment_info.size_remote;
+   ws->info.bo_alignment = alignment_info.size_remote;
 
    return TRUE;
 
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index b07afe6..2054f16 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -144,7 +144,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
     /* All VM address space holes will implicitly start aligned to the
      * size alignment, so we don't need to sanitize the alignment here
      */
-    size = align(size, rws->info.gart_page_size);
+    size = align(size, rws->info.bo_alignment);
 
     pipe_mutex_lock(rws->bo_va_mutex);
     /* first look for a hole */
@@ -202,7 +202,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
 {
     struct radeon_bo_va_hole *hole;
 
-    size = align(size, rws->info.gart_page_size);
+    size = align(size, rws->info.bo_alignment);
 
     pipe_mutex_lock(rws->bo_va_mutex);
     if ((va + size) == rws->va_offset) {
@@ -313,9 +313,9 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
     pipe_mutex_destroy(bo->map_mutex);
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
-        rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
+        rws->allocated_vram -= align(bo->base.size, rws->info.bo_alignment);
     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
-        rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
+        rws->allocated_gtt -= align(bo->base.size, rws->info.bo_alignment);
     FREE(bo);
 }
 
@@ -591,9 +591,9 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
     }
 
     if (initial_domains & RADEON_DOMAIN_VRAM)
-        rws->allocated_vram += align(size, rws->info.gart_page_size);
+        rws->allocated_vram += align(size, rws->info.bo_alignment);
     else if (initial_domains & RADEON_DOMAIN_GTT)
-        rws->allocated_gtt += align(size, rws->info.gart_page_size);
+        rws->allocated_gtt += align(size, rws->info.bo_alignment);
 
     return bo;
 }
@@ -727,12 +727,12 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
     if (size > UINT_MAX)
         return NULL;
 
-    /* Align size to page size. This is the minimum alignment for normal
-     * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
-     * like constant/uniform buffers, can benefit from better and more reuse.
+    /* This is the minimum address & size alignment for normal BOs. Aligning
+     * these here helps the cached bufmgr. Especially small BOs, like
+     * constant/uniform buffers, can benefit from better and more reuse.
      */
-    size = align(size, ws->info.gart_page_size);
-    alignment = align(alignment, ws->info.gart_page_size);
+    size = align(size, ws->info.bo_alignment);
+    alignment = align(alignment, ws->info.bo_alignment);
 
     /* Only set one usage bit each for domains and flags, or the cache manager
      * might consider different sets of domains / flags compatible
@@ -780,7 +780,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
 
     memset(&args, 0, sizeof(args));
     args.addr = (uintptr_t)pointer;
-    args.size = align(size, ws->info.gart_page_size);
+    args.size = align(size, ws->info.bo_alignment);
     args.flags = RADEON_GEM_USERPTR_ANONONLY |
         RADEON_GEM_USERPTR_VALIDATE |
         RADEON_GEM_USERPTR_REGISTER;
@@ -843,7 +843,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
         pipe_mutex_unlock(ws->bo_handles_mutex);
     }
 
-    ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
+    ws->allocated_gtt += align(bo->base.size, ws->info.bo_alignment);
 
     return (struct pb_buffer*)bo;
 }
@@ -981,9 +981,9 @@ done:
     bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
-        ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
+        ws->allocated_vram += align(bo->base.size, ws->info.bo_alignment);
     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
-        ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
+        ws->allocated_gtt += align(bo->base.size, ws->info.bo_alignment);
 
     return (struct pb_buffer*)bo;
 
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 5c85c8f..2d005c8 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -833,8 +833,8 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
     ws->va_offset = ws->va_start;
     list_inithead(&ws->va_holes);
 
-    /* TTM aligns the BO size to the CPU page size */
-    ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
+    /* TTM aligns the BO address & size to the CPU page size */
+    ws->info.bo_alignment = sysconf(_SC_PAGESIZE);
 
     ws->ncs = 0;
     pipe_semaphore_init(&ws->cs_queued, 0);
-- 
2.8.1



More information about the mesa-dev mailing list