[Mesa-dev] [PATCH 7/9] radv: Disable DCC for GENERAL layout and compute transfer dest.

Bas Nieuwenhuizen bas at basnieuwenhuizen.nl
Fri Dec 29 02:06:15 UTC 2017


Apps can use this for render feedback loops, where things are
defined if they render each pixel only once. However, DCC fails
here, as the level of coherence is a block not a pixel, so disable it.

This is also going to help implementing other stuff.

Even if we optimize this later to only happen if there actually is
a loop (if possible at all ...), then the machinery is still useful
to exclude images accessible by the SDMA queue when that is implemented.
---
 src/amd/vulkan/radv_cmd_buffer.c   | 29 +++++++++++++++++++++++------
 src/amd/vulkan/radv_image.c        | 12 ++++++++++++
 src/amd/vulkan/radv_meta_resolve.c | 10 ++++++++--
 src/amd/vulkan/radv_private.h      |  4 ++++
 4 files changed, 47 insertions(+), 8 deletions(-)

diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index 42468bceed2..c735d201802 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -1184,10 +1184,20 @@ radv_emit_depth_biais(struct radv_cmd_buffer *cmd_buffer)
 static void
 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
 			 int index,
-			 struct radv_attachment_info *att)
+			 struct radv_attachment_info *att,
+			 struct radv_image *image,
+			 VkImageLayout layout)
 {
 	bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
 	struct radv_color_buffer_info *cb = &att->cb;
+	uint32_t cb_color_info = cb->cb_color_info;
+
+	if (!radv_layout_dcc_compressed(image, layout,
+	                                radv_image_queue_family_mask(image,
+	                                                             cmd_buffer->queue_family_index,
+	                                                             cmd_buffer->queue_family_index))) {
+		cb_color_info &= C_028C70_DCC_ENABLE;
+	}
 
 	if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
 		radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
@@ -1195,7 +1205,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
 		radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_view);
-		radeon_emit(cmd_buffer->cs, cb->cb_color_info);
+		radeon_emit(cmd_buffer->cs, cb_color_info);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
 		radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
@@ -1215,7 +1225,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
 		radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_view);
-		radeon_emit(cmd_buffer->cs, cb->cb_color_info);
+		radeon_emit(cmd_buffer->cs, cb_color_info);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
 		radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
 		radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
@@ -1461,13 +1471,15 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
 
 		int idx = subpass->color_attachments[i].attachment;
 		struct radv_attachment_info *att = &framebuffer->attachments[idx];
+		struct radv_image *image = att->attachment->image;
+		VkImageLayout layout = subpass->color_attachments[i].layout;
 
 		radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
 
 		assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
-		radv_emit_fb_color_state(cmd_buffer, i, att);
+		radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
 
-		radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
+		radv_load_color_clear_regs(cmd_buffer, image, i);
 	}
 
 	if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
@@ -3878,7 +3890,12 @@ static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
 					     const VkImageSubresourceRange *range)
 {
 	if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
-		radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
+		radv_initialize_dcc(cmd_buffer, image,
+		                    radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask) ?
+		                         0x20202020u : 0xffffffffu);
+	} else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
+	           !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
+		radv_decompress_dcc(cmd_buffer, image, range);
 	} else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
 		   !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
 		radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c
index aaf12bdcb16..6088928df80 100644
--- a/src/amd/vulkan/radv_image.c
+++ b/src/amd/vulkan/radv_image.c
@@ -1113,6 +1113,18 @@ bool radv_layout_can_fast_clear(const struct radv_image *image,
 		queue_mask == (1u << RADV_QUEUE_GENERAL);
 }
 
+bool radv_layout_dcc_compressed(const struct radv_image *image,
+			        VkImageLayout layout,
+			        unsigned queue_mask)
+{
+	/* Don't compress compute transfer dst, as image stores are not supported. */
+	if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+	    (queue_mask & (1u << RADV_QUEUE_COMPUTE)))
+		return false;
+
+	return image->surface.num_dcc_levels > 0 && layout != VK_IMAGE_LAYOUT_GENERAL;
+}
+
 
 unsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family)
 {
diff --git a/src/amd/vulkan/radv_meta_resolve.c b/src/amd/vulkan/radv_meta_resolve.c
index 26489b7834f..49326fe9d10 100644
--- a/src/amd/vulkan/radv_meta_resolve.c
+++ b/src/amd/vulkan/radv_meta_resolve.c
@@ -315,10 +315,15 @@ enum radv_resolve_method {
 
 static void radv_pick_resolve_method_images(struct radv_image *src_image,
 					    struct radv_image *dest_image,
+					    VkImageLayout dest_image_layout,
+					    struct radv_cmd_buffer *cmd_buffer,
 					    enum radv_resolve_method *method)
 
 {
-	if (dest_image->surface.num_dcc_levels > 0) {
+	uint32_t queue_mask = radv_image_queue_family_mask(dest_image,
+	                                                   cmd_buffer->queue_family_index,
+	                                                   cmd_buffer->queue_family_index);
+	if (radv_layout_dcc_compressed(dest_image, dest_image_layout, queue_mask)) {
 		*method = RESOLVE_FRAGMENT;
 	} else if (dest_image->surface.micro_tile_mode != src_image->surface.micro_tile_mode) {
 		*method = RESOLVE_COMPUTE;
@@ -360,6 +365,7 @@ void radv_CmdResolveImage(
 		resolve_method = RESOLVE_COMPUTE;
 
 	radv_pick_resolve_method_images(src_image, dest_image,
+					dest_image_layout, cmd_buffer,
 					&resolve_method);
 
 	if (resolve_method == RESOLVE_FRAGMENT) {
@@ -577,7 +583,7 @@ radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer)
 		struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
 		struct radv_image *src_img = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment->image;
 
-		radv_pick_resolve_method_images(dst_img, src_img, &resolve_method);
+		radv_pick_resolve_method_images(dst_img, src_img, dest_att.layout, cmd_buffer, &resolve_method);
 		if (resolve_method == RESOLVE_FRAGMENT) {
 			break;
 		}
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index db8ea895e3a..eb5a64d2536 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -1355,6 +1355,10 @@ bool radv_layout_can_fast_clear(const struct radv_image *image,
 			        VkImageLayout layout,
 			        unsigned queue_mask);
 
+bool radv_layout_dcc_compressed(const struct radv_image *image,
+			        VkImageLayout layout,
+			        unsigned queue_mask);
+
 static inline bool
 radv_vi_dcc_enabled(const struct radv_image *image, unsigned level)
 {
-- 
2.15.1



More information about the mesa-dev mailing list