[Mesa-dev] [PATCH 3/3] radv: Don't use a virtual function for getting the buffer virtual address.

Samuel Pitoiset samuel.pitoiset at gmail.com
Mon Sep 18 08:40:24 UTC 2017


With Dave's comment applied, series is:

Reviewed-by: Samuel Pitoiset <samuel.pitoiset at gmail.com>

On 09/17/2017 12:59 PM, Bas Nieuwenhuizen wrote:
> We are really not going to use a winsys which does not need to store
> the va, so might as well store it in a standard field.
> 
> Not sure this helps perf much though, as most of the cost is in the
> cache miss accessing the bo anyway, which we stil need to do.
> ---
>   src/amd/vulkan/radv_cmd_buffer.c              | 56 +++++++++++++--------------
>   src/amd/vulkan/radv_debug.c                   |  2 +-
>   src/amd/vulkan/radv_descriptor_set.c          |  8 ++--
>   src/amd/vulkan/radv_device.c                  | 30 +++++++-------
>   src/amd/vulkan/radv_image.c                   |  6 +--
>   src/amd/vulkan/radv_meta_buffer.c             |  8 ++--
>   src/amd/vulkan/radv_meta_fast_clear.c         |  2 +-
>   src/amd/vulkan/radv_query.c                   | 12 +++---
>   src/amd/vulkan/radv_radeon_winsys.h           | 11 ++++--
>   src/amd/vulkan/si_cmd_buffer.c                |  4 +-
>   src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c | 17 +++-----
>   src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h |  2 +-
>   src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c | 18 ++++-----
>   13 files changed, 87 insertions(+), 89 deletions(-)
> 
> diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
> index e0a6724c20a..5ac7a978665 100644
> --- a/src/amd/vulkan/radv_cmd_buffer.c
> +++ b/src/amd/vulkan/radv_cmd_buffer.c
> @@ -353,7 +353,7 @@ void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
>   	if (!device->trace_bo)
>   		return;
>   
> -	va = device->ws->buffer_get_va(device->trace_bo);
> +	va = buffer_get_va(device->trace_bo);
>   	if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
>   		va += 4;
>   
> @@ -398,7 +398,7 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
>   	if (!device->trace_bo)
>   		return;
>   
> -	va = device->ws->buffer_get_va(device->trace_bo);
> +	va = buffer_get_va(device->trace_bo);
>   
>   	switch (ring) {
>   	case RING_GFX:
> @@ -432,7 +432,7 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer)
>   	if (!device->trace_bo)
>   		return;
>   
> -	va = device->ws->buffer_get_va(device->trace_bo) + 24;
> +	va = buffer_get_va(device->trace_bo) + 24;
>   
>   	MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
>   							   cmd_buffer->cs, 4 + MAX_SETS * 2);
> @@ -616,7 +616,7 @@ radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
>   		struct ac_vs_output_info *outinfo)
>   {
>   	struct radeon_winsys *ws = cmd_buffer->device->ws;
> -	uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
> +	uint64_t va = buffer_get_va(shader->bo) + shader->bo_offset;
>   	unsigned export_count;
>   
>   	ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
> @@ -666,7 +666,7 @@ radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
>   		struct ac_es_output_info *outinfo)
>   {
>   	struct radeon_winsys *ws = cmd_buffer->device->ws;
> -	uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
> +	uint64_t va = buffer_get_va(shader->bo) + shader->bo_offset;
>   
>   	ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
>   	radv_emit_prefetch(cmd_buffer, va, shader->code_size);
> @@ -685,7 +685,7 @@ radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer,
>   		struct radv_shader_variant *shader)
>   {
>   	struct radeon_winsys *ws = cmd_buffer->device->ws;
> -	uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
> +	uint64_t va = buffer_get_va(shader->bo) + shader->bo_offset;
>   	uint32_t rsrc2 = shader->rsrc2;
>   
>   	ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
> @@ -710,7 +710,7 @@ radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer,
>   		struct radv_shader_variant *shader)
>   {
>   	struct radeon_winsys *ws = cmd_buffer->device->ws;
> -	uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
> +	uint64_t va = buffer_get_va(shader->bo) + shader->bo_offset;
>   
>   	ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
>   	radv_emit_prefetch(cmd_buffer, va, shader->code_size);
> @@ -845,7 +845,7 @@ radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
>   			       S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
>   			       S_028B90_ENABLE(gs_num_invocations > 0));
>   
> -	va = ws->buffer_get_va(gs->bo) + gs->bo_offset;
> +	va = buffer_get_va(gs->bo) + gs->bo_offset;
>   	ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
>   	radv_emit_prefetch(cmd_buffer, va, gs->code_size);
>   
> @@ -886,7 +886,7 @@ radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
>   	assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
>   
>   	ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
> -	va = ws->buffer_get_va(ps->bo) + ps->bo_offset;
> +	va = buffer_get_va(ps->bo) + ps->bo_offset;
>   	ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
>   	radv_emit_prefetch(cmd_buffer, va, ps->code_size);
>   
> @@ -1196,7 +1196,7 @@ radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
>   			  VkClearDepthStencilValue ds_clear_value,
>   			  VkImageAspectFlags aspects)
>   {
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
> +	uint64_t va = buffer_get_va(image->bo);
>   	va += image->offset + image->clear_value_offset;
>   	unsigned reg_offset = 0, reg_count = 0;
>   
> @@ -1236,7 +1236,7 @@ static void
>   radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
>   			   struct radv_image *image)
>   {
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
> +	uint64_t va = buffer_get_va(image->bo);
>   	va += image->offset + image->clear_value_offset;
>   
>   	if (!image->surface.htile_size)
> @@ -1268,7 +1268,7 @@ radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
>   				  bool value)
>   {
>   	uint64_t pred_val = value;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
> +	uint64_t va = buffer_get_va(image->bo);
>   	va += image->offset + image->dcc_pred_offset;
>   
>   	if (!image->surface.dcc_size)
> @@ -1292,7 +1292,7 @@ radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
>   			  int idx,
>   			  uint32_t color_values[2])
>   {
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
> +	uint64_t va = buffer_get_va(image->bo);
>   	va += image->offset + image->clear_value_offset;
>   
>   	if (!image->cmask.size && !image->surface.dcc_size)
> @@ -1319,7 +1319,7 @@ radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
>   			   struct radv_image *image,
>   			   int idx)
>   {
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
> +	uint64_t va = buffer_get_va(image->bo);
>   	va += image->offset + image->clear_value_offset;
>   
>   	if (!image->cmask.size && !image->surface.dcc_size)
> @@ -1522,7 +1522,7 @@ radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
>   					 &bo_offset))
>   		return;
>   
> -	set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +	set->va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   	set->va += bo_offset;
>   }
>   
> @@ -1547,7 +1547,7 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer)
>   		uptr[1] = set_va >> 32;
>   	}
>   
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +	uint64_t va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   	va += offset;
>   
>   	if (cmd_buffer->state.pipeline) {
> @@ -1636,7 +1636,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
>   	memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
>   	       16 * layout->dynamic_offset_count);
>   
> -	va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +	va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   	va += offset;
>   
>   	MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
> @@ -1708,7 +1708,7 @@ radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer)
>   			uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
>   
>   			device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
> -			va = device->ws->buffer_get_va(buffer->bo);
> +			va = buffer_get_va(buffer->bo);
>   
>   			offset = cmd_buffer->state.vertex_bindings[vb].offset + velems->offset[i];
>   			va += offset + buffer->offset;
> @@ -1721,7 +1721,7 @@ radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer)
>   			desc[3] = velems->rsrc_word3[i];
>   		}
>   
> -		va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +		va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   		va += vb_offset;
>   
>   		radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
> @@ -2073,7 +2073,7 @@ static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
>   {
>   	struct radv_device *device = cmd_buffer->device;
>   	if (device->gfx_init) {
> -		uint64_t va = device->ws->buffer_get_va(device->gfx_init);
> +		uint64_t va = buffer_get_va(device->gfx_init);
>   		device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
>   		radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
>   		radeon_emit(cmd_buffer->cs, va);
> @@ -2164,7 +2164,7 @@ void radv_CmdBindIndexBuffer(
>   	RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
>   
>   	cmd_buffer->state.index_type = indexType; /* vk matches hw */
> -	cmd_buffer->state.index_va = cmd_buffer->device->ws->buffer_get_va(index_buffer->bo);
> +	cmd_buffer->state.index_va = buffer_get_va(index_buffer->bo);
>   	cmd_buffer->state.index_va += index_buffer->offset + offset;
>   
>   	int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
> @@ -2286,7 +2286,7 @@ void radv_meta_push_descriptor_set(
>   	                                  (void**) &push_set->mapped_ptr))
>   		return;
>   
> -	push_set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +	push_set->va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   	push_set->va += bo_offset;
>   
>   	radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
> @@ -2390,7 +2390,7 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
>   	cmd_buffer->state.emitted_compute_pipeline = pipeline;
>   
>   	compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
> -	va = ws->buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
> +	va = buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
>   
>   	ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
>   	radv_emit_prefetch(cmd_buffer, va, compute_shader->code_size);
> @@ -2987,12 +2987,12 @@ radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer,
>   	RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
>   
> -	uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
> +	uint64_t indirect_va = buffer_get_va(buffer->bo);
>   	indirect_va += offset + buffer->offset;
>   	uint64_t count_va = 0;
>   
>   	if (count_buffer) {
> -		count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo);
> +		count_va = buffer_get_va(count_buffer->bo);
>   		count_va += count_offset + count_buffer->offset;
>   	}
>   
> @@ -3178,7 +3178,7 @@ void radv_CmdDispatchIndirect(
>   {
>   	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
>   	RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
> +	uint64_t va = buffer_get_va(buffer->bo);
>   	va += buffer->offset + offset;
>   
>   	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
> @@ -3549,7 +3549,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
>   			unsigned value)
>   {
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
> +	uint64_t va = buffer_get_va(event->bo);
>   
>   	cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
>   
> @@ -3605,7 +3605,7 @@ void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
>   
>   	for (unsigned i = 0; i < eventCount; ++i) {
>   		RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
> -		uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
> +		uint64_t va = buffer_get_va(event->bo);
>   
>   		cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
>   
> diff --git a/src/amd/vulkan/radv_debug.c b/src/amd/vulkan/radv_debug.c
> index f6f4dad65ca..98d4ee2a8d1 100644
> --- a/src/amd/vulkan/radv_debug.c
> +++ b/src/amd/vulkan/radv_debug.c
> @@ -380,7 +380,7 @@ radv_dump_annotated_shader(struct radv_pipeline *pipeline,
>   	if (!shader)
>   		return;
>   
> -	start_addr = device->ws->buffer_get_va(shader->bo) + shader->bo_offset;
> +	start_addr = buffer_get_va(shader->bo) + shader->bo_offset;
>   	end_addr = start_addr + shader->code_size;
>   
>   	/* See if any wave executes the shader. */
> diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c
> index 5b9cfe66331..99d4f5bcaeb 100644
> --- a/src/amd/vulkan/radv_descriptor_set.c
> +++ b/src/amd/vulkan/radv_descriptor_set.c
> @@ -301,7 +301,7 @@ radv_descriptor_set_create(struct radv_device *device,
>   		if (pool->current_offset + layout_size <= pool->size) {
>   			set->bo = pool->bo;
>   			set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
> -			set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
> +			set->va = buffer_get_va(set->bo) + pool->current_offset;
>   			pool->current_offset += layout_size;
>   			list_addtail(&set->vram_list, &pool->vram_list);
>   		} else if (!pool->host_memory_base) {
> @@ -325,7 +325,7 @@ radv_descriptor_set_create(struct radv_device *device,
>   			}
>   			set->bo = pool->bo;
>   			set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
> -			set->va = device->ws->buffer_get_va(set->bo) + offset;
> +			set->va = buffer_get_va(set->bo) + offset;
>   			list_add(&set->vram_list, prev);
>   		} else
>   			return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
> @@ -560,7 +560,7 @@ static void write_buffer_descriptor(struct radv_device *device,
>                                       const VkDescriptorBufferInfo *buffer_info)
>   {
>   	RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
> -	uint64_t va = device->ws->buffer_get_va(buffer->bo);
> +	uint64_t va = buffer_get_va(buffer->bo);
>   	uint32_t range = buffer_info->range;
>   
>   	if (buffer_info->range == VK_WHOLE_SIZE)
> @@ -589,7 +589,7 @@ static void write_dynamic_buffer_descriptor(struct radv_device *device,
>                                               const VkDescriptorBufferInfo *buffer_info)
>   {
>   	RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
> -	uint64_t va = device->ws->buffer_get_va(buffer->bo);
> +	uint64_t va = buffer_get_va(buffer->bo);
>   	unsigned size = buffer_info->range;
>   
>   	if (buffer_info->range == VK_WHOLE_SIZE)
> diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
> index e6d595dfbe5..039c59a438d 100644
> --- a/src/amd/vulkan/radv_device.c
> +++ b/src/amd/vulkan/radv_device.c
> @@ -1377,13 +1377,13 @@ fill_geom_tess_rings(struct radv_queue *queue,
>   	uint32_t *desc = &map[4];
>   
>   	if (esgs_ring_bo)
> -		esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
> +		esgs_va = buffer_get_va(esgs_ring_bo);
>   	if (gsvs_ring_bo)
> -		gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
> +		gsvs_va = buffer_get_va(gsvs_ring_bo);
>   	if (tess_factor_ring_bo)
> -		tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
> +		tess_factor_va = buffer_get_va(tess_factor_ring_bo);
>   	if (tess_offchip_ring_bo)
> -		tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
> +		tess_offchip_va = buffer_get_va(tess_offchip_ring_bo);
>   
>   	/* stride 0, num records - size, add tid, swizzle, elsize4,
>   	   index stride 64 */
> @@ -1730,7 +1730,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
>   			uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
>   
>   			if (scratch_bo) {
> -				uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
> +				uint64_t scratch_va = buffer_get_va(scratch_bo);
>   				uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
>   				                 S_008F04_SWIZZLE_ENABLE(1);
>   				map[0] = scratch_va;
> @@ -1768,7 +1768,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
>   		}
>   
>   		if (tess_factor_ring_bo) {
> -			uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
> +			uint64_t tf_va = buffer_get_va(tess_factor_ring_bo);
>   			if (queue->device->physical_device->rad_info.chip_class >= CIK) {
>   				radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
>   						       S_030938_SIZE(tess_factor_ring_size / 4));
> @@ -1797,7 +1797,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
>   			                   R_00B430_SPI_SHADER_USER_DATA_HS_0,
>   			                   R_00B530_SPI_SHADER_USER_DATA_LS_0};
>   
> -			uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
> +			uint64_t va = buffer_get_va(descriptor_bo);
>   
>   			for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
>   				radeon_set_sh_reg_seq(cs, regs[i], 2);
> @@ -1807,7 +1807,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
>   		}
>   
>   		if (compute_scratch_bo) {
> -			uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
> +			uint64_t scratch_va = buffer_get_va(compute_scratch_bo);
>   			uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
>   			                 S_008F04_SWIZZLE_ENABLE(1);
>   
> @@ -2955,7 +2955,7 @@ radv_initialise_color_surface(struct radv_device *device,
>   	/* Intensity is implemented as Red, so treat it that way. */
>   	cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
>   
> -	va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
> +	va = buffer_get_va(iview->bo) + iview->image->offset;
>   
>   	cb->cb_color_base = va >> 8;
>   
> @@ -3007,11 +3007,11 @@ radv_initialise_color_surface(struct radv_device *device,
>   	}
>   
>   	/* CMASK variables */
> -	va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
> +	va = buffer_get_va(iview->bo) + iview->image->offset;
>   	va += iview->image->cmask.offset;
>   	cb->cb_color_cmask = va >> 8;
>   
> -	va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
> +	va = buffer_get_va(iview->bo) + iview->image->offset;
>   	va += iview->image->dcc_offset;
>   	cb->cb_dcc_base = va >> 8;
>   	cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
> @@ -3028,7 +3028,7 @@ radv_initialise_color_surface(struct radv_device *device,
>   	}
>   
>   	if (iview->image->fmask.size) {
> -		va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
> +		va = buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
>   		cb->cb_color_fmask = va >> 8;
>   		cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
>   	} else {
> @@ -3173,7 +3173,7 @@ radv_initialise_ds_surface(struct radv_device *device,
>   	ds->db_htile_data_base = 0;
>   	ds->db_htile_surface = 0;
>   
> -	va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
> +	va = buffer_get_va(iview->bo) + iview->image->offset;
>   	s_offs = z_offs = va;
>   
>   	if (device->physical_device->rad_info.chip_class >= GFX9) {
> @@ -3201,7 +3201,7 @@ radv_initialise_ds_surface(struct radv_device *device,
>   			if (!iview->image->surface.has_stencil)
>   				/* Use all of the htile_buffer for depth if there's no stencil. */
>   				ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
> -			va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
> +			va = buffer_get_va(iview->bo) + iview->image->offset +
>   				iview->image->htile_offset;
>   			ds->db_htile_data_base = va >> 8;
>   			ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
> @@ -3265,7 +3265,7 @@ radv_initialise_ds_surface(struct radv_device *device,
>   				/* Use all of the htile_buffer for depth if there's no stencil. */
>   				ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
>   
> -			va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
> +			va = buffer_get_va(iview->bo) + iview->image->offset +
>   				iview->image->htile_offset;
>   			ds->db_htile_data_base = va >> 8;
>   			ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
> diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c
> index aeec5ce8822..f0dec40c39f 100644
> --- a/src/amd/vulkan/radv_image.c
> +++ b/src/amd/vulkan/radv_image.c
> @@ -190,7 +190,7 @@ radv_make_buffer_descriptor(struct radv_device *device,
>   {
>   	const struct vk_format_description *desc;
>   	unsigned stride;
> -	uint64_t gpu_address = device->ws->buffer_get_va(buffer->bo);
> +	uint64_t gpu_address = buffer_get_va(buffer->bo);
>   	uint64_t va = gpu_address + buffer->offset;
>   	unsigned num_format, data_format;
>   	int first_non_void;
> @@ -227,7 +227,7 @@ si_set_mutable_tex_desc_fields(struct radv_device *device,
>   			       unsigned block_width, bool is_stencil,
>   			       uint32_t *state)
>   {
> -	uint64_t gpu_address = image->bo ? device->ws->buffer_get_va(image->bo) + image->offset : 0;
> +	uint64_t gpu_address = image->bo ? buffer_get_va(image->bo) + image->offset : 0;
>   	uint64_t va = gpu_address;
>   	enum chip_class chip_class = device->physical_device->rad_info.chip_class;
>   	uint64_t meta_va = 0;
> @@ -468,7 +468,7 @@ si_make_texture_descriptor(struct radv_device *device,
>   	/* Initialize the sampler view for FMASK. */
>   	if (image->fmask.size) {
>   		uint32_t fmask_format, num_format;
> -		uint64_t gpu_address = device->ws->buffer_get_va(image->bo);
> +		uint64_t gpu_address = buffer_get_va(image->bo);
>   		uint64_t va;
>   
>   		va = gpu_address + image->offset + image->fmask.offset;
> diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c
> index a8a41e05fa3..864cf710d18 100644
> --- a/src/amd/vulkan/radv_meta_buffer.c
> +++ b/src/amd/vulkan/radv_meta_buffer.c
> @@ -418,7 +418,7 @@ void radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer,
>   	if (size >= 4096)
>   		fill_buffer_shader(cmd_buffer, bo, offset, size, value);
>   	else if (size) {
> -		uint64_t va = cmd_buffer->device->ws->buffer_get_va(bo);
> +		uint64_t va = buffer_get_va(bo);
>   		va += offset;
>   		cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
>   		si_cp_dma_clear_buffer(cmd_buffer, va, size, value);
> @@ -436,8 +436,8 @@ void radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
>   		copy_buffer_shader(cmd_buffer, src_bo, dst_bo,
>   				   src_offset, dst_offset, size);
>   	else if (size) {
> -		uint64_t src_va = cmd_buffer->device->ws->buffer_get_va(src_bo);
> -		uint64_t dst_va = cmd_buffer->device->ws->buffer_get_va(dst_bo);
> +		uint64_t src_va = buffer_get_va(src_bo);
> +		uint64_t dst_va = buffer_get_va(dst_bo);
>   		src_va += src_offset;
>   		dst_va += dst_offset;
>   
> @@ -497,7 +497,7 @@ void radv_CmdUpdateBuffer(
>   	RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
>   	bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
>   	uint64_t words = dataSize / 4;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
> +	uint64_t va = buffer_get_va(dst_buffer->bo);
>   	va += dstOffset + dst_buffer->offset;
>   
>   	assert(!(dataSize & 3));
> diff --git a/src/amd/vulkan/radv_meta_fast_clear.c b/src/amd/vulkan/radv_meta_fast_clear.c
> index 27f8c160c06..693f010c712 100644
> --- a/src/amd/vulkan/radv_meta_fast_clear.c
> +++ b/src/amd/vulkan/radv_meta_fast_clear.c
> @@ -341,7 +341,7 @@ radv_emit_set_predication_state_from_image(struct radv_cmd_buffer *cmd_buffer,
>   	uint64_t va = 0;
>   
>   	if (value) {
> -		va = cmd_buffer->device->ws->buffer_get_va(image->bo) + image->offset;
> +		va = buffer_get_va(image->bo) + image->offset;
>   		va += image->dcc_pred_offset;
>   	}
>   
> diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
> index 3c40774042d..4b3fca9603e 100644
> --- a/src/amd/vulkan/radv_query.c
> +++ b/src/amd/vulkan/radv_query.c
> @@ -952,8 +952,8 @@ void radv_CmdCopyQueryPoolResults(
>   	RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
>   	unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
> -	uint64_t dest_va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
> +	uint64_t va = buffer_get_va(pool->bo);
> +	uint64_t dest_va = buffer_get_va(dst_buffer->bo);
>   	dest_va += dst_buffer->offset + dstOffset;
>   
>   	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
> @@ -1057,7 +1057,7 @@ void radv_CmdResetQueryPool(
>   {
>   	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
>   	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
> +	uint64_t va = buffer_get_va(pool->bo);
>   
>   	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
>   
> @@ -1078,7 +1078,7 @@ void radv_CmdBeginQuery(
>   	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
>   	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
> +	uint64_t va = buffer_get_va(pool->bo);
>   	va += pool->stride * query;
>   
>   	cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
> @@ -1118,7 +1118,7 @@ void radv_CmdEndQuery(
>   	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
>   	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
> +	uint64_t va = buffer_get_va(pool->bo);
>   	uint64_t avail_va = va + pool->availability_offset + 4 * query;
>   	va += pool->stride * query;
>   
> @@ -1170,7 +1170,7 @@ void radv_CmdWriteTimestamp(
>   	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
>   	bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
>   	struct radeon_winsys_cs *cs = cmd_buffer->cs;
> -	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
> +	uint64_t va = buffer_get_va(pool->bo);
>   	uint64_t avail_va = va + pool->availability_offset + 4 * query;
>   	uint64_t query_va = va + pool->stride * query;
>   
> diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h
> index f7399de1aed..21fd4626928 100644
> --- a/src/amd/vulkan/radv_radeon_winsys.h
> +++ b/src/amd/vulkan/radv_radeon_winsys.h
> @@ -133,9 +133,11 @@ struct radeon_bo_metadata {
>   };
>   
>   uint32_t syncobj_handle;
> -struct radeon_winsys_bo;
>   struct radeon_winsys_fence;
>   
> +struct radeon_winsys_bo {
> +	uint64_t va;
> +};
>   struct radv_winsys_sem_counts {
>   	uint32_t syncobj_count;
>   	uint32_t sem_count;
> @@ -180,8 +182,6 @@ struct radeon_winsys {
>   
>   	void (*buffer_unmap)(struct radeon_winsys_bo *bo);
>   
> -	uint64_t (*buffer_get_va)(struct radeon_winsys_bo *bo);
> -
>   	void (*buffer_set_metadata)(struct radeon_winsys_bo *bo,
>   				    struct radeon_bo_metadata *md);
>   
> @@ -263,4 +263,9 @@ static inline void radeon_emit_array(struct radeon_winsys_cs *cs,
>   	cs->cdw += count;
>   }
>   
> +static inline uint64_t buffer_get_va(struct radeon_winsys_bo *bo)
> +{
> +	return bo->va;
> +}
> +
>   #endif /* RADV_RADEON_WINSYS_H */
> diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c
> index 9f8d881d272..fd4142be251 100644
> --- a/src/amd/vulkan/si_cmd_buffer.c
> +++ b/src/amd/vulkan/si_cmd_buffer.c
> @@ -1060,7 +1060,7 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
>   	uint32_t *ptr = NULL;
>   	uint64_t va = 0;
>   	if (chip_class == GFX9) {
> -		va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
> +		va = buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
>   		ptr = &cmd_buffer->gfx9_fence_idx;
>   	}
>   	si_cs_emit_cache_flush(cmd_buffer->cs,
> @@ -1239,7 +1239,7 @@ static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigne
>   
>   	radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT,  &offset, &ptr);
>   
> -	va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
> +	va = buffer_get_va(cmd_buffer->upload.upload_bo);
>   	va += offset;
>   
>   	si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
> diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
> index c8b67a095b7..d910aae4ba9 100644
> --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
> +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
> @@ -67,7 +67,7 @@ radv_amdgpu_winsys_virtual_map(struct radv_amdgpu_winsys_bo *bo,
>   
>   	p_atomic_inc(&range->bo->ref_count);
>   	int r = radv_amdgpu_bo_va_op(bo->ws->dev, range->bo->bo, range->bo_offset, range->size,
> -				     range->offset + bo->va, 0, AMDGPU_VA_OP_MAP);
> +				     range->offset + bo->base.va, 0, AMDGPU_VA_OP_MAP);
>   	if (r)
>   		abort();
>   }
> @@ -82,7 +82,7 @@ radv_amdgpu_winsys_virtual_unmap(struct radv_amdgpu_winsys_bo *bo,
>   		return; /* TODO: PRT mapping */
>   
>   	int r = radv_amdgpu_bo_va_op(bo->ws->dev, range->bo->bo, range->bo_offset, range->size,
> -				     range->offset + bo->va, 0, AMDGPU_VA_OP_UNMAP);
> +				     range->offset + bo->base.va, 0, AMDGPU_VA_OP_UNMAP);
>   	if (r)
>   		abort();
>   	radv_amdgpu_winsys_bo_destroy((struct radeon_winsys_bo *)range->bo);
> @@ -252,7 +252,7 @@ static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo)
>   			bo->ws->num_buffers--;
>   			pthread_mutex_unlock(&bo->ws->global_bo_list_lock);
>   		}
> -		radv_amdgpu_bo_va_op(bo->ws->dev, bo->bo, 0, bo->size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
> +		radv_amdgpu_bo_va_op(bo->ws->dev, bo->bo, 0, bo->size, bo->base.va, 0, AMDGPU_VA_OP_UNMAP);
>   		amdgpu_bo_free(bo->bo);
>   	}
>   	amdgpu_va_range_free(bo->va_handle);
> @@ -295,7 +295,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws,
>   	if (r)
>   		goto error_va_alloc;
>   
> -	bo->va = va;
> +	bo->base.va = va;
>   	bo->va_handle = va_handle;
>   	bo->size = size;
>   	bo->ws = ws;
> @@ -367,12 +367,6 @@ error_va_alloc:
>   	return NULL;
>   }
>   
> -static uint64_t radv_amdgpu_winsys_bo_get_va(struct radeon_winsys_bo *_bo)
> -{
> -	struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
> -	return bo->va;
> -}
> -
>   static void *
>   radv_amdgpu_winsys_bo_map(struct radeon_winsys_bo *_bo)
>   {
> @@ -433,7 +427,7 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws,
>   		initial |= RADEON_DOMAIN_GTT;
>   
>   	bo->bo = result.buf_handle;
> -	bo->va = va;
> +	bo->base.va = va;
>   	bo->va_handle = va_handle;
>   	bo->initial_domain = initial;
>   	bo->size = result.alloc_size;
> @@ -527,7 +521,6 @@ void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws)
>   {
>   	ws->base.buffer_create = radv_amdgpu_winsys_bo_create;
>   	ws->base.buffer_destroy = radv_amdgpu_winsys_bo_destroy;
> -	ws->base.buffer_get_va = radv_amdgpu_winsys_bo_get_va;
>   	ws->base.buffer_map = radv_amdgpu_winsys_bo_map;
>   	ws->base.buffer_unmap = radv_amdgpu_winsys_bo_unmap;
>   	ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd;
> diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h
> index 4512e76b333..f32e4308386 100644
> --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h
> +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h
> @@ -40,8 +40,8 @@ struct radv_amdgpu_map_range {
>   };
>   
>   struct radv_amdgpu_winsys_bo {
> +	struct radeon_winsys_bo base;
>   	amdgpu_va_handle va_handle;
> -	uint64_t va;
>   	uint64_t size;
>   	struct radv_amdgpu_winsys *ws;
>   	bool is_virtual;
> diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
> index 4a9ecab657f..74ffb36d618 100644
> --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
> +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
> @@ -215,7 +215,7 @@ radv_amdgpu_cs_create(struct radeon_winsys *ws,
>   			return NULL;
>   		}
>   
> -		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
> +		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
>   		cs->base.buf = (uint32_t *)cs->ib_mapped;
>   		cs->base.max_dw = ib_size / 4 - 4;
>   		cs->ib_size_ptr = &cs->ib.size;
> @@ -306,8 +306,8 @@ static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
>   	cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
>   
>   	cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
> -	cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
> -	cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
> +	cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
> +	cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
>   	cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
>   	cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
>   
> @@ -360,7 +360,7 @@ static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
>   			cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
>   
>   		cs->num_old_ib_buffers = 0;
> -		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
> +		cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
>   		cs->ib_size_ptr = &cs->ib.size;
>   		cs->ib.size = 0;
>   	}
> @@ -886,7 +886,7 @@ static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
>   		}
>   
>   		ib.size = size;
> -		ib.ib_mc_address = ws->buffer_get_va(bo);
> +		ib.ib_mc_address = buffer_get_va(bo);
>   
>   		request.ip_type = cs0->hw_ip;
>   		request.ring = queue_idx;
> @@ -964,19 +964,19 @@ static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
>   
>   		bo = (struct radv_amdgpu_winsys_bo*)
>   		       (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
> -		if (addr >= bo->va && addr - bo->va < bo->size) {
> +		if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
>   			if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
> -				return (char *)ret + (addr - bo->va);
> +				return (char *)ret + (addr - bo->base.va);
>   		}
>   	}
>   	if(cs->ws->debug_all_bos) {
>   		pthread_mutex_lock(&cs->ws->global_bo_list_lock);
>   		list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
>   		                    &cs->ws->global_bo_list, global_list_item) {
> -			if (addr >= bo->va && addr - bo->va < bo->size) {
> +			if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
>   				if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
>   					pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
> -					return (char *)ret + (addr - bo->va);
> +					return (char *)ret + (addr - bo->base.va);
>   				}
>   			}
>   		}
> 


More information about the mesa-dev mailing list