[Mesa-dev] [PATCH 12/17] anv/cmd_buffer: Move dirty bits into anv_cmd_*_state
Jason Ekstrand
jason at jlekstrand.net
Sat Dec 16 01:09:10 UTC 2017
---
src/intel/vulkan/anv_cmd_buffer.c | 24 +++++++++++------------
src/intel/vulkan/anv_private.h | 6 ++++--
src/intel/vulkan/gen7_cmd_buffer.c | 28 +++++++++++++-------------
src/intel/vulkan/gen8_cmd_buffer.c | 40 +++++++++++++++++++-------------------
src/intel/vulkan/genX_blorp_exec.c | 2 +-
src/intel/vulkan/genX_cmd_buffer.c | 20 +++++++++----------
src/intel/vulkan/genX_gpu_memcpy.c | 2 +-
7 files changed, 62 insertions(+), 60 deletions(-)
diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c
index 9720e7e..ad5baee 100644
--- a/src/intel/vulkan/anv_cmd_buffer.c
+++ b/src/intel/vulkan/anv_cmd_buffer.c
@@ -345,7 +345,7 @@ void anv_CmdBindPipeline(
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
cmd_buffer->state.compute.base.pipeline = pipeline;
- cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.compute.pipeline_dirty = true;
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
break;
@@ -353,12 +353,12 @@ void anv_CmdBindPipeline(
case VK_PIPELINE_BIND_POINT_GRAPHICS:
cmd_buffer->state.gfx.base.pipeline = pipeline;
cmd_buffer->state.vb_dirty |= pipeline->vb_used;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
/* Apply the dynamic state from the pipeline */
- cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
+ cmd_buffer->state.gfx.dirty |= pipeline->dynamic_state_mask;
anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
&pipeline->dynamic_state,
pipeline->dynamic_state_mask);
@@ -385,7 +385,7 @@ void anv_CmdSetViewport(
memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
void anv_CmdSetScissor(
@@ -403,7 +403,7 @@ void anv_CmdSetScissor(
memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
pScissors, scissorCount * sizeof(*pScissors));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
void anv_CmdSetLineWidth(
@@ -413,7 +413,7 @@ void anv_CmdSetLineWidth(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.line_width = lineWidth;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
}
void anv_CmdSetDepthBias(
@@ -428,7 +428,7 @@ void anv_CmdSetDepthBias(
cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
}
void anv_CmdSetBlendConstants(
@@ -440,7 +440,7 @@ void anv_CmdSetBlendConstants(
memcpy(cmd_buffer->state.dynamic.blend_constants,
blendConstants, sizeof(float) * 4);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
}
void anv_CmdSetDepthBounds(
@@ -453,7 +453,7 @@ void anv_CmdSetDepthBounds(
cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
}
void anv_CmdSetStencilCompareMask(
@@ -468,7 +468,7 @@ void anv_CmdSetStencilCompareMask(
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
}
void anv_CmdSetStencilWriteMask(
@@ -483,7 +483,7 @@ void anv_CmdSetStencilWriteMask(
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
}
void anv_CmdSetStencilReference(
@@ -498,7 +498,7 @@ void anv_CmdSetStencilReference(
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
cmd_buffer->state.dynamic.stencil_reference.back = reference;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
}
static void
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index f1868a3..413f8d5 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -1704,6 +1704,8 @@ struct anv_cmd_pipeline_state {
*/
struct anv_cmd_graphics_state {
struct anv_cmd_pipeline_state base;
+
+ anv_cmd_dirty_mask_t dirty;
};
/** State tracking for compute pipeline
@@ -1715,6 +1717,8 @@ struct anv_cmd_graphics_state {
*/
struct anv_cmd_compute_state {
struct anv_cmd_pipeline_state base;
+
+ bool pipeline_dirty;
};
/** State required while building cmd buffer */
@@ -1727,8 +1731,6 @@ struct anv_cmd_state {
struct anv_cmd_compute_state compute;
uint32_t vb_dirty;
- anv_cmd_dirty_mask_t dirty;
- anv_cmd_dirty_mask_t compute_dirty;
enum anv_pipe_bits pending_pipe_bits;
uint32_t num_workgroups_offset;
struct anv_bo *num_workgroups_bo;
diff --git a/src/intel/vulkan/gen7_cmd_buffer.c b/src/intel/vulkan/gen7_cmd_buffer.c
index 8d7fd8f..f16056a 100644
--- a/src/intel/vulkan/gen7_cmd_buffer.c
+++ b/src/intel/vulkan/gen7_cmd_buffer.c
@@ -113,7 +113,7 @@ void genX(CmdBindIndexBuffer)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
if (GEN_IS_HASWELL)
cmd_buffer->state.restart_index = restart_index_for_type[indexType];
cmd_buffer->state.gen7.index_buffer = buffer;
@@ -156,10 +156,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_RENDER_TARGETS |
- ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
- ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_RENDER_TARGETS |
+ ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
+ ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
uint32_t sf_dw[GENX(3DSTATE_SF_length)];
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
@@ -174,8 +174,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf);
}
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
struct anv_state cc_state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
@@ -197,10 +197,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_RENDER_TARGETS |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_RENDER_TARGETS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)];
struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
@@ -229,8 +229,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
if (cmd_buffer->state.gen7.index_buffer &&
- cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_INDEX_BUFFER)) {
+ cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_INDEX_BUFFER)) {
struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer;
uint32_t offset = cmd_buffer->state.gen7.index_offset;
@@ -255,7 +255,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
- cmd_buffer->state.dirty = 0;
+ cmd_buffer->state.gfx.dirty = 0;
}
void
diff --git a/src/intel/vulkan/gen8_cmd_buffer.c b/src/intel/vulkan/gen8_cmd_buffer.c
index 002e50c..b0a44ef 100644
--- a/src/intel/vulkan/gen8_cmd_buffer.c
+++ b/src/intel/vulkan/gen8_cmd_buffer.c
@@ -383,8 +383,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) {
uint32_t sf_dw[GENX(3DSTATE_SF_length)];
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
@@ -402,8 +402,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen8.sf);
}
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)){
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)){
uint32_t raster_dw[GENX(3DSTATE_RASTER_length)];
struct GENX(3DSTATE_RASTER) raster = {
GENX(3DSTATE_RASTER_header),
@@ -422,8 +422,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
* using a big old #if switch here.
*/
#if GEN_GEN == 8
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
struct anv_state cc_state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
@@ -447,10 +447,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_RENDER_TARGETS |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_RENDER_TARGETS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
uint32_t wm_depth_stencil_dw[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
@@ -477,7 +477,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
want_depth_pma_fix(cmd_buffer));
}
#else
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
struct anv_state cc_state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
GENX(COLOR_CALC_STATE_length) * 4,
@@ -498,11 +498,11 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_RENDER_TARGETS |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
- ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_RENDER_TARGETS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
uint32_t dwords[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
struct GENX(3DSTATE_WM_DEPTH_STENCIL) wm_depth_stencil = {
@@ -531,15 +531,15 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
}
#endif
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
- ANV_CMD_DIRTY_INDEX_BUFFER)) {
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_INDEX_BUFFER)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) {
vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart;
vf.CutIndex = cmd_buffer->state.restart_index;
}
}
- cmd_buffer->state.dirty = 0;
+ cmd_buffer->state.gfx.dirty = 0;
}
void genX(CmdBindIndexBuffer)(
@@ -571,7 +571,7 @@ void genX(CmdBindIndexBuffer)(
ib.BufferSize = buffer->size - offset;
}
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
}
/* Set of stage bits for which are pipelined, i.e. they get queued by the
diff --git a/src/intel/vulkan/genX_blorp_exec.c b/src/intel/vulkan/genX_blorp_exec.c
index e849a3b..871ade8 100644
--- a/src/intel/vulkan/genX_blorp_exec.c
+++ b/src/intel/vulkan/genX_blorp_exec.c
@@ -219,6 +219,6 @@ genX(blorp_exec)(struct blorp_batch *batch,
blorp_exec(batch, params);
cmd_buffer->state.vb_dirty = ~0;
- cmd_buffer->state.dirty = ~0;
+ cmd_buffer->state.gfx.dirty = ~0;
cmd_buffer->state.push_constants_dirty = ~0;
}
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index 75aa40a..491ec7b 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -982,7 +982,7 @@ genX(BeginCommandBuffer)(
result = genX(cmd_buffer_setup_attachments)(cmd_buffer,
cmd_buffer->state.pass, NULL);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
}
return result;
@@ -2035,7 +2035,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->state.vb_dirty &= ~vb_emit;
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) {
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
/* The exact descriptor layout is pulled from the pipeline, so we need
@@ -2072,7 +2072,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
#endif
/* Render targets live in the same binding table as fragment descriptors */
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
/* We emit the binding tables and sampler tables first, then emit push
@@ -2098,16 +2098,16 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
if (dirty)
cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
gen8_cmd_buffer_emit_viewport(cmd_buffer);
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
ANV_CMD_DIRTY_PIPELINE)) {
gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
pipeline->depth_clamp_enable);
}
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
gen7_cmd_buffer_emit_scissor(cmd_buffer);
genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
@@ -2512,7 +2512,7 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
genX(flush_pipeline_select_gpgpu)(cmd_buffer);
- if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
+ if (cmd_buffer->state.compute.pipeline_dirty) {
/* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
*
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
@@ -2528,7 +2528,7 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
}
if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
- (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
+ cmd_buffer->state.compute.pipeline_dirty) {
/* FIXME: figure out descriptors for gen7 */
result = flush_compute_descriptor_set(cmd_buffer);
if (result != VK_SUCCESS)
@@ -2549,7 +2549,7 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
- cmd_buffer->state.compute_dirty = 0;
+ cmd_buffer->state.compute.pipeline_dirty = false;
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
}
@@ -3090,7 +3090,7 @@ genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
{
cmd_buffer->state.subpass = subpass;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
/* Our implementation of VK_KHR_multiview uses instancing to draw the
* different views. If the client asks for instancing, we need to use the
diff --git a/src/intel/vulkan/genX_gpu_memcpy.c b/src/intel/vulkan/genX_gpu_memcpy.c
index 5b00b31..f3ada93 100644
--- a/src/intel/vulkan/genX_gpu_memcpy.c
+++ b/src/intel/vulkan/genX_gpu_memcpy.c
@@ -272,5 +272,5 @@ genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
prim.BaseVertexLocation = 0;
}
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
}
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list