[Mesa-dev] [PATCH 06/14] winsys/amdgpu: add slab entry structures to amdgpu_winsys_bo
Nicolai Hähnle
nhaehnle at gmail.com
Tue Sep 13 09:56:17 UTC 2016
From: Nicolai Hähnle <nicolai.haehnle at amd.com>
Already adjust amdgpu_bo_map/unmap accordingly.
---
src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 83 ++++++++++++++++++++-----------
src/gallium/winsys/amdgpu/drm/amdgpu_bo.h | 25 +++++++---
src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 2 +-
3 files changed, 74 insertions(+), 36 deletions(-)
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 37a7ba1..f581d9b 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -144,64 +144,70 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
FREE(bo->fences);
bo->num_fences = 0;
bo->max_fences = 0;
}
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
+ assert(bo->bo && "must not be called for slab entries");
+
pipe_mutex_lock(bo->ws->global_bo_list_lock);
- LIST_DEL(&bo->global_list_item);
+ LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
- amdgpu_va_range_free(bo->va_handle);
+ amdgpu_va_range_free(bo->u.real.va_handle);
amdgpu_bo_free(bo->bo);
amdgpu_bo_remove_fences(bo);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
- if (bo->map_count >= 1) {
+ if (bo->u.real.map_count >= 1) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
bo->ws->mapped_vram -= bo->base.size;
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
bo->ws->mapped_gtt -= bo->base.size;
}
FREE(bo);
}
static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
- if (bo->use_reusable_pool)
- pb_cache_add_buffer(&bo->cache_entry);
+ assert(bo->bo); /* slab buffers have a separate vtbl */
+
+ if (bo->u.real.use_reusable_pool)
+ pb_cache_add_buffer(&bo->u.real.cache_entry);
else
amdgpu_bo_destroy(_buf);
}
static void *amdgpu_bo_map(struct pb_buffer *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
+ struct amdgpu_winsys_bo *real;
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
int r;
void *cpu = NULL;
+ uint64_t offset = 0;
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
@@ -265,68 +271,80 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
bo->ws->buffer_wait_time += os_time_get_nano() - time;
}
}
/* If the buffer is created from user memory, return the user pointer. */
if (bo->user_ptr)
- return bo->user_ptr;
+ return bo->user_ptr;
+
+ if (bo->bo) {
+ real = bo;
+ } else {
+ real = bo->u.slab.real;
+ offset = bo->va - real->va;
+ }
- r = amdgpu_bo_cpu_map(bo->bo, &cpu);
+ r = amdgpu_bo_cpu_map(real->bo, &cpu);
if (r) {
/* Clear the cache and try again. */
- pb_cache_release_all_buffers(&bo->ws->bo_cache);
- r = amdgpu_bo_cpu_map(bo->bo, &cpu);
+ pb_cache_release_all_buffers(&real->ws->bo_cache);
+ r = amdgpu_bo_cpu_map(real->bo, &cpu);
if (r)
return NULL;
}
- if (p_atomic_inc_return(&bo->map_count) == 1) {
- if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->mapped_vram += bo->base.size;
- else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->mapped_gtt += bo->base.size;
+ if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
+ if (real->initial_domain & RADEON_DOMAIN_VRAM)
+ real->ws->mapped_vram += real->base.size;
+ else if (real->initial_domain & RADEON_DOMAIN_GTT)
+ real->ws->mapped_gtt += real->base.size;
}
- return cpu;
+ return cpu + offset;
}
static void amdgpu_bo_unmap(struct pb_buffer *buf)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
+ struct amdgpu_winsys_bo *real;
if (bo->user_ptr)
return;
- if (p_atomic_dec_zero(&bo->map_count)) {
- if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->mapped_vram -= bo->base.size;
- else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->mapped_gtt -= bo->base.size;
+ real = bo->bo ? bo : bo->u.slab.real;
+
+ if (p_atomic_dec_zero(&real->u.real.map_count)) {
+ if (real->initial_domain & RADEON_DOMAIN_VRAM)
+ real->ws->mapped_vram -= real->base.size;
+ else if (real->initial_domain & RADEON_DOMAIN_GTT)
+ real->ws->mapped_gtt -= real->base.size;
}
- amdgpu_bo_cpu_unmap(bo->bo);
+ amdgpu_bo_cpu_unmap(real->bo);
}
static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
amdgpu_bo_destroy_or_cache
/* other functions are never called */
};
static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
{
struct amdgpu_winsys *ws = bo->ws;
+ assert(bo->bo);
+
pipe_mutex_lock(ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
+ LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
unsigned usage,
enum radeon_bo_domain initial_domain,
unsigned flags,
@@ -339,21 +357,21 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
amdgpu_va_handle va_handle;
unsigned va_gap_size;
int r;
assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
- pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base,
+ pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
pb_cache_bucket);
request.alloc_size = size;
request.phys_alignment = alignment;
if (initial_domain & RADEON_DOMAIN_VRAM)
request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
if (initial_domain & RADEON_DOMAIN_GTT)
request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
if (flags & RADEON_FLAG_CPU_ACCESS)
@@ -383,21 +401,21 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
goto error_va_map;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
bo->base.usage = usage;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
bo->bo = buf_handle;
bo->va = va;
- bo->va_handle = va_handle;
+ bo->u.real.va_handle = va_handle;
bo->initial_domain = initial_domain;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
if (initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align64(size, ws->info.gart_page_size);
else if (initial_domain & RADEON_DOMAIN_GTT)
ws->allocated_gtt += align64(size, ws->info.gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
@@ -455,20 +473,22 @@ static unsigned eg_tile_split_rev(unsigned eg_tile_split)
}
static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
struct radeon_bo_metadata *md)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
struct amdgpu_bo_info info = {0};
uint32_t tiling_flags;
int r;
+ assert(bo->bo && "must not be called for slab entries");
+
r = amdgpu_bo_query_info(bo->bo, &info);
if (r)
return;
tiling_flags = info.metadata.tiling_info;
md->microtile = RADEON_LAYOUT_LINEAR;
md->macrotile = RADEON_LAYOUT_LINEAR;
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
@@ -488,20 +508,22 @@ static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
}
static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
struct radeon_bo_metadata *md)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
struct amdgpu_bo_metadata metadata = {0};
uint32_t tiling_flags = 0;
+ assert(bo->bo && "must not be called for slab entries");
+
if (md->macrotile == RADEON_LAYOUT_TILED)
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
else if (md->microtile == RADEON_LAYOUT_TILED)
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
else
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config);
tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw));
tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh));
@@ -575,21 +597,21 @@ amdgpu_bo_create(struct radeon_winsys *rws,
pb_cache_bucket);
if (!bo) {
/* Clear the cache and try again. */
pb_cache_release_all_buffers(&ws->bo_cache);
bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
pb_cache_bucket);
if (!bo)
return NULL;
}
- bo->use_reusable_pool = true;
+ bo->u.real.use_reusable_pool = true;
return &bo->base;
}
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned *stride,
unsigned *offset)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
@@ -642,21 +664,21 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
initial |= RADEON_DOMAIN_GTT;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->bo = result.buf_handle;
bo->base.size = result.alloc_size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
bo->va = va;
- bo->va_handle = va_handle;
+ bo->u.real.va_handle = va_handle;
bo->initial_domain = initial;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
bo->is_shared = true;
if (stride)
*stride = whandle->stride;
if (offset)
*offset = whandle->offset;
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
@@ -681,21 +703,26 @@ error:
static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
enum amdgpu_bo_handle_type type;
int r;
- bo->use_reusable_pool = false;
+ if (!bo->bo) {
+ offset += bo->va - bo->u.slab.real->va;
+ bo = bo->u.slab.real;
+ }
+
+ bo->u.real.use_reusable_pool = false;
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
case DRM_API_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
case DRM_API_HANDLE_TYPE_KMS:
type = amdgpu_bo_handle_type_kms;
@@ -740,21 +767,21 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->bo = buf_handle;
bo->base.alignment = 0;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
bo->user_ptr = pointer;
bo->va = va;
- bo->va_handle = va_handle;
+ bo->u.real.va_handle = va_handle;
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
return (struct pb_buffer*)bo;
error_va_map:
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
index 93cc83a..e5b5cf5 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
@@ -28,53 +28,64 @@
/*
* Authors:
* Marek Olšák <maraeo at gmail.com>
*/
#ifndef AMDGPU_BO_H
#define AMDGPU_BO_H
#include "amdgpu_winsys.h"
+#include "pipebuffer/pb_slab.h"
+
struct amdgpu_winsys_bo {
struct pb_buffer base;
- struct pb_cache_entry cache_entry;
+ union {
+ struct {
+ struct pb_cache_entry cache_entry;
+
+ amdgpu_va_handle va_handle;
+ int map_count;
+ bool use_reusable_pool;
+
+ struct list_head global_list_item;
+ } real;
+ struct {
+ struct pb_slab_entry entry;
+ struct amdgpu_winsys_bo *real;
+ } slab;
+ } u;
struct amdgpu_winsys *ws;
void *user_ptr; /* from buffer_from_ptr */
- amdgpu_bo_handle bo;
- int map_count;
+ amdgpu_bo_handle bo; /* NULL for slab entries */
uint32_t unique_id;
- amdgpu_va_handle va_handle;
uint64_t va;
enum radeon_bo_domain initial_domain;
- bool use_reusable_pool;
/* how many command streams is this bo referenced in? */
int num_cs_references;
/* how many command streams, which are being emitted in a separate
* thread, is this bo referenced in? */
volatile int num_active_ioctls;
/* whether buffer_get_handle or buffer_from_handle was called,
* it can only transition from false to true
*/
volatile int is_shared; /* bool (int for atomicity) */
/* Fences for buffer synchronization. */
unsigned num_fences;
unsigned max_fences;
struct pipe_fence_handle **fences;
-
- struct list_head global_list_item;
};
bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
void amdgpu_bo_destroy(struct pb_buffer *_buf);
void amdgpu_bo_init_functions(struct amdgpu_winsys *ws);
static inline
struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
{
return (struct amdgpu_winsys_bo *)bo;
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index d5ea705..6fc47aa 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -916,21 +916,21 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
pipe_mutex_lock(ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
pipe_mutex_unlock(ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
cs->error_code = -ENOMEM;
return;
}
- LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
+ LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
assert(num < ws->num_buffers);
handles[num++] = bo->bo;
}
r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
handles, NULL,
&cs->request.resources);
free(handles);
pipe_mutex_unlock(ws->global_bo_list_lock);
} else {
--
2.7.4
More information about the mesa-dev
mailing list