[Mesa-dev] [PATCH 3/5] ac/gpu_info: rename has_virtual_memory -> r600_has_virtual_memory
Marek Olšák
maraeo at gmail.com
Thu Mar 22 15:03:44 UTC 2018
From: Marek Olšák <marek.olsak at amd.com>
---
src/amd/common/ac_gpu_info.c | 2 +-
src/amd/common/ac_gpu_info.h | 2 +-
src/gallium/drivers/r600/r600_buffer_common.c | 4 ++--
src/gallium/drivers/r600/r600_cs.h | 2 +-
src/gallium/drivers/r600/r600_pipe_common.c | 6 +++---
src/gallium/winsys/radeon/drm/radeon_drm_bo.c | 12 ++++++------
src/gallium/winsys/radeon/drm/radeon_drm_cs.c | 6 +++---
src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 16 ++++++++--------
8 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/src/amd/common/ac_gpu_info.c b/src/amd/common/ac_gpu_info.c
index ca556a8b11f..8f01038a3a0 100644
--- a/src/amd/common/ac_gpu_info.c
+++ b/src/amd/common/ac_gpu_info.c
@@ -324,21 +324,21 @@ bool ac_query_gpu_info(int fd, amdgpu_device_handle dev,
info->tcc_cache_line_size = 64; /* TC L2 line size on GCN */
if (info->chip_class == GFX9) {
info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(amdinfo->gb_addr_cfg);
info->pipe_interleave_bytes =
256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo->gb_addr_cfg);
} else {
info->num_tile_pipes = cik_get_num_tile_pipes(amdinfo);
info->pipe_interleave_bytes =
256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
}
- info->has_virtual_memory = true;
+ info->r600_has_virtual_memory = true;
assert(util_is_power_of_two(dma.available_rings + 1));
assert(util_is_power_of_two(compute.available_rings + 1));
info->num_sdma_rings = util_bitcount(dma.available_rings);
info->num_compute_rings = util_bitcount(compute.available_rings);
/* Get the number of good compute units. */
info->num_good_compute_units = 0;
for (i = 0; i < info->max_se; i++)
diff --git a/src/amd/common/ac_gpu_info.h b/src/amd/common/ac_gpu_info.h
index 3f08b577c4b..22ac015c23a 100644
--- a/src/amd/common/ac_gpu_info.h
+++ b/src/amd/common/ac_gpu_info.h
@@ -54,21 +54,21 @@ struct radeon_info {
uint32_t gart_page_size;
uint64_t gart_size;
uint64_t vram_size;
uint64_t vram_vis_size;
unsigned gds_size;
unsigned gds_gfx_partition_size;
uint64_t max_alloc_size;
uint32_t min_alloc_size;
uint32_t address32_hi;
bool has_dedicated_vram;
- bool has_virtual_memory;
+ bool r600_has_virtual_memory;
bool gfx_ib_pad_with_type2;
bool has_hw_decode;
unsigned ib_start_alignment;
uint32_t num_sdma_rings;
uint32_t num_compute_rings;
uint32_t uvd_fw_version;
uint32_t vce_fw_version;
bool uvd_enc_supported;
uint32_t me_fw_version;
uint32_t me_fw_feature;
diff --git a/src/gallium/drivers/r600/r600_buffer_common.c b/src/gallium/drivers/r600/r600_buffer_common.c
index ca19af9b2ef..17a8c3a596f 100644
--- a/src/gallium/drivers/r600/r600_buffer_common.c
+++ b/src/gallium/drivers/r600/r600_buffer_common.c
@@ -199,21 +199,21 @@ bool r600_alloc_resource(struct r600_common_screen *rscreen,
return false;
}
/* Replace the pointer such that if res->buf wasn't NULL, it won't be
* NULL. This should prevent crashes with multiple contexts using
* the same buffer where one of the contexts invalidates it while
* the others are using it. */
old_buf = res->buf;
res->buf = new_buf; /* should be atomic */
- if (rscreen->info.has_virtual_memory)
+ if (rscreen->info.r600_has_virtual_memory)
res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
else
res->gpu_address = 0;
pb_reference(&old_buf, NULL);
util_range_set_empty(&res->valid_buffer_range);
/* Print debug information. */
if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
@@ -647,21 +647,21 @@ r600_buffer_from_user_memory(struct pipe_screen *screen,
util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0);
/* Convert a user pointer to a buffer. */
rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
if (!rbuffer->buf) {
FREE(rbuffer);
return NULL;
}
- if (rscreen->info.has_virtual_memory)
+ if (rscreen->info.r600_has_virtual_memory)
rbuffer->gpu_address =
ws->buffer_get_virtual_address(rbuffer->buf);
else
rbuffer->gpu_address = 0;
rbuffer->vram_usage = 0;
rbuffer->gart_usage = templ->width0;
return &rbuffer->b.b;
}
diff --git a/src/gallium/drivers/r600/r600_cs.h b/src/gallium/drivers/r600/r600_cs.h
index 9c8298a846d..632c7f5f944 100644
--- a/src/gallium/drivers/r600/r600_cs.h
+++ b/src/gallium/drivers/r600/r600_cs.h
@@ -112,21 +112,21 @@ radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
}
static inline void r600_emit_reloc(struct r600_common_context *rctx,
struct r600_ring *ring, struct r600_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
struct radeon_winsys_cs *cs = ring->cs;
- bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.has_virtual_memory;
+ bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.r600_has_virtual_memory;
unsigned reloc = radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
if (!has_vm) {
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc);
}
}
static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
diff --git a/src/gallium/drivers/r600/r600_pipe_common.c b/src/gallium/drivers/r600/r600_pipe_common.c
index 026e8dc9868..255a1e01b7f 100644
--- a/src/gallium/drivers/r600/r600_pipe_common.c
+++ b/src/gallium/drivers/r600/r600_pipe_common.c
@@ -120,21 +120,21 @@ void r600_gfx_write_event_eop(struct r600_common_context *ctx,
if (buf)
r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen)
{
unsigned dwords = 6;
- if (!screen->info.has_virtual_memory)
+ if (!screen->info.r600_has_virtual_memory)
dwords += 2;
return dwords;
}
void r600_gfx_wait_fence(struct r600_common_context *ctx,
struct r600_resource *buf,
uint64_t va, uint32_t ref, uint32_t mask)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
@@ -304,21 +304,21 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
RADEON_USAGE_WRITE)))
r600_dma_emit_wait_idle(ctx);
/* If GPUVM is not supported, the CS checker needs 2 entries
* in the buffer list per packet, which has to be done manually.
*/
- if (ctx->screen->info.has_virtual_memory) {
+ if (ctx->screen->info.r600_has_virtual_memory) {
if (dst)
radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
if (src)
radeon_add_to_buffer_list(ctx, &ctx->dma, src,
RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
}
@@ -1349,21 +1349,21 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen,
printf("chip_class = %i\n", rscreen->info.chip_class);
printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_vis_size, 1024*1024));
printf("max_alloc_size = %i MB\n",
(int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
- printf("has_virtual_memory = %i\n", rscreen->info.has_virtual_memory);
+ printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
printf("has_hw_decode = %u\n", rscreen->info.has_hw_decode);
printf("num_sdma_rings = %i\n", rscreen->info.num_sdma_rings);
printf("num_compute_rings = %u\n", rscreen->info.num_compute_rings);
printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
printf("ce_fw_version = %i\n", rscreen->info.ce_fw_version);
printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 06842a4fbc8..1617a2fe32e 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -364,21 +364,21 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(rws->bo_names,
(void*)(uintptr_t)bo->flink_name);
}
mtx_unlock(&rws->bo_handles_mutex);
if (bo->u.real.ptr)
os_munmap(bo->u.real.ptr, bo->base.size);
- if (rws->info.has_virtual_memory) {
+ if (rws->info.r600_has_virtual_memory) {
if (rws->va_unmap_working) {
struct drm_radeon_gem_va va;
va.handle = bo->handle;
va.vm_id = 0;
va.operation = RADEON_VA_UNMAP;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
@@ -672,21 +672,21 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
bo->va = 0;
bo->initial_domain = initial_domains;
bo->hash = __sync_fetch_and_add(&rws->next_bo_hash, 1);
(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
if (heap >= 0) {
pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
heap);
}
- if (rws->info.has_virtual_memory) {
+ if (rws->info.r600_has_virtual_memory) {
struct drm_radeon_gem_va va;
unsigned va_gap_size;
va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
if (flags & RADEON_FLAG_32BIT) {
bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm32,
size + va_gap_size, alignment);
assert(bo->va + size < rws->vm32.end);
} else {
@@ -967,21 +967,21 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
/* VRAM implies WC. This is not optional. */
if (domain & RADEON_DOMAIN_VRAM)
flags |= RADEON_FLAG_GTT_WC;
/* NO_CPU_ACCESS is valid with VRAM only. */
if (domain != RADEON_DOMAIN_VRAM)
flags &= ~RADEON_FLAG_NO_CPU_ACCESS;
/* Sub-allocate small buffers from slabs. */
if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
- ws->info.has_virtual_memory &&
+ ws->info.r600_has_virtual_memory &&
alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
struct pb_slab_entry *entry;
int heap = radeon_get_heap_index(domain, flags);
if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
goto no_slab;
entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
if (!entry) {
/* Clear the cache and try again. */
@@ -1020,21 +1020,21 @@ no_slab:
bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
0, heap));
if (bo)
return &bo->base;
}
bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo)
return NULL;
}
bo->u.real.use_reusable_pool = use_reusable_pool;
mtx_lock(&ws->bo_handles_mutex);
@@ -1082,21 +1082,21 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
bo->user_ptr = pointer;
bo->va = 0;
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
mtx_unlock(&ws->bo_handles_mutex);
- if (ws->info.has_virtual_memory) {
+ if (ws->info.r600_has_virtual_memory) {
struct drm_radeon_gem_va va;
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
va.vm_id = 0;
va.offset = bo->va;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
@@ -1225,21 +1225,21 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
done:
mtx_unlock(&ws->bo_handles_mutex);
if (stride)
*stride = whandle->stride;
if (offset)
*offset = whandle->offset;
- if (ws->info.has_virtual_memory && !bo->va) {
+ if (ws->info.r600_has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
va.vm_id = 0;
va.offset = bo->va;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index 92452e47fb1..a1975dff8df 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -238,21 +238,21 @@ static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
/* For async DMA, every add_buffer call must add a buffer to the list
* no matter how many duplicates there are. This is due to the fact
* the DMA CS checker doesn't use NOP packets for offset patching,
* but always uses the i-th buffer from the list to patch the i-th
* offset. If there are N offsets in a DMA CS, there must also be N
* buffers in the relocation list.
*
* This doesn't have to be done if virtual memory is enabled,
* because there is no offset patching with virtual memory.
*/
- if (cs->ring_type != RING_DMA || cs->ws->info.has_virtual_memory) {
+ if (cs->ring_type != RING_DMA || cs->ws->info.r600_has_virtual_memory) {
return i;
}
}
/* New relocation, check if the backing array is large enough. */
if (csc->num_relocs >= csc->max_relocs) {
uint32_t size;
csc->max_relocs = MAX2(csc->max_relocs + 16, (unsigned)(csc->max_relocs * 1.3));
size = csc->max_relocs * sizeof(csc->relocs_bo[0]);
@@ -628,21 +628,21 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
for (i = 0; i < num_relocs; i++) {
/* Update the number of active asynchronous CS ioctls for the buffer. */
p_atomic_inc(&cs->cst->relocs_bo[i].bo->num_active_ioctls);
}
switch (cs->ring_type) {
case RING_DMA:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
- if (cs->ws->info.has_virtual_memory) {
+ if (cs->ws->info.r600_has_virtual_memory) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
}
break;
case RING_UVD:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_UVD;
cs->cst->cs.num_chunks = 3;
break;
@@ -652,21 +652,21 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
cs->cst->cs.num_chunks = 3;
break;
default:
case RING_GFX:
case RING_COMPUTE:
cs->cst->flags[0] = RADEON_CS_KEEP_TILING_FLAGS;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 3;
- if (cs->ws->info.has_virtual_memory) {
+ if (cs->ws->info.r600_has_virtual_memory) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
if (flags & PIPE_FLUSH_END_OF_FRAME) {
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (cs->ring_type == RING_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 036e9861f5f..3ee243adbcc 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -432,36 +432,36 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
/* Default value. */
ws->info.enabled_rb_mask = u_bit_consecutive(0, ws->info.num_render_backends);
/*
* This fails (silently) on non-GCN or older kernels, overwriting the
* default enabled_rb_mask with the result of the last query.
*/
if (ws->gen >= DRV_SI)
radeon_get_drm_value(ws->fd, RADEON_INFO_SI_BACKEND_ENABLED_MASK, NULL,
&ws->info.enabled_rb_mask);
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
if (ws->info.drm_minor >= 13) {
uint32_t ib_vm_max_size;
- ws->info.has_virtual_memory = true;
+ ws->info.r600_has_virtual_memory = true;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->va_start))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ib_vm_max_size))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
radeon_get_drm_value(ws->fd, RADEON_INFO_VA_UNMAP_WORKING, NULL,
&ws->va_unmap_working);
}
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", false))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
}
/* Get max pipes, this is only needed for compute shaders. All evergreen+
* chips have at least 2 pipes, so we use 2 as a default. */
ws->info.r600_max_quad_pipes = 2;
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
&ws->info.r600_max_quad_pipes);
/* All GPUs have at least one compute unit */
ws->info.num_good_compute_units = 1;
@@ -537,21 +537,21 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
static void radeon_winsys_destroy(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
if (util_queue_is_initialized(&ws->cs_queue))
util_queue_destroy(&ws->cs_queue);
mtx_destroy(&ws->hyperz_owner_mutex);
mtx_destroy(&ws->cmask_owner_mutex);
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_deinit(&ws->bo_slabs);
pb_cache_deinit(&ws->bo_cache);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
}
util_hash_table_destroy(ws->bo_names);
util_hash_table_destroy(ws->bo_handles);
util_hash_table_destroy(ws->bo_vas);
@@ -766,21 +766,21 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
if (!do_winsys_init(ws))
goto fail1;
pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
500000, ws->check_vm ? 1.0f : 2.0f, 0,
MIN2(ws->info.vram_size, ws->info.gart_size),
radeon_bo_destroy,
radeon_bo_can_reclaim);
- if (ws->info.has_virtual_memory) {
+ if (ws->info.r600_has_virtual_memory) {
/* There is no fundamental obstacle to using slab buffer allocation
* without GPUVM, but enabling it requires making sure that the drivers
* honor the address offset.
*/
if (!pb_slabs_init(&ws->bo_slabs,
RADEON_SLAB_MIN_SIZE_LOG2, RADEON_SLAB_MAX_SIZE_LOG2,
RADEON_MAX_SLAB_HEAPS,
ws,
radeon_bo_can_reclaim_slab,
radeon_bo_slab_alloc,
@@ -870,21 +870,21 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws);
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
mtx_unlock(&fd_tab_mutex);
return &ws->base;
fail_slab:
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_deinit(&ws->bo_slabs);
fail_cache:
pb_cache_deinit(&ws->bo_cache);
fail1:
mtx_unlock(&fd_tab_mutex);
if (ws->surf_man)
radeon_surface_manager_free(ws->surf_man);
if (ws->fd >= 0)
close(ws->fd);
--
2.15.1
More information about the mesa-dev
mailing list