<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Mon, Mar 27, 2017 at 5:33 AM, Iago Toral <span dir="ltr"><<a href="mailto:itoral@igalia.com" target="_blank">itoral@igalia.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><div><div><div class="h5"><div>On Fri, 2017-03-24 at 16:57 -0700, Jason Ekstrand wrote:</div><blockquote type="cite"><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Fri, Mar 24, 2017 at 5:53 AM, Iago Toral <span dir="ltr"><<a href="mailto:itoral@igalia.com" target="_blank">itoral@igalia.com</a>></span> wrote:<br><blockquote type="cite"><div class="m_6150988601065960206HOEnZb"><div class="m_6150988601065960206h5">On Wed, 2017-03-22 at 21:01 -0700, Jason Ekstrand wrote:<br>
---<br>
src/intel/vulkan/anv_private.<wbr>h | 6 ++<br>
src/intel/vulkan/genX_cmd_buf<wbr>fer.c | 141<br>
++++++++++++++++++++++++++++++<wbr>++++++-<br>
src/intel/vulkan/genX_pipelin<wbr>e.c | 10 ++-<br>
3 files changed, 152 insertions(+), 5 deletions(-)<br>
<br>
<br>
diff --git a/src/intel/vulkan/anv_private<wbr>.h<br>
b/src/intel/vulkan/anv_private<wbr>.h<br>
index b402bc3..253dce2 100644<br>
--- a/src/intel/vulkan/anv_private<wbr>.h<br>
+++ b/src/intel/vulkan/anv_private<wbr>.h<br>
@@ -1980,6 +1980,12 @@ struct anv_subpass {<br>
bool <wbr> has_resolve<wbr>;<br>
};<br>
<br>
+static inline unsigned<br>
+anv_subpass_view_count(const struct anv_subpass *subpass)<br>
+{<br>
+ return MAX2(1, _mesa_bitcount(subpass->view_m<wbr>ask));<br>
+}<br>
+<br>
enum anv_subpass_usage {<br>
ANV_SUBPASS_USAGE_DRAW = (1 << 0),<br>
ANV_SUBPASS_USAGE_INPUT = (1 << 1),<br>
diff --git a/src/intel/vulkan/genX_cmd_bu<wbr>ffer.c<br>
b/src/intel/vulkan/genX_cmd_bu<wbr>ffer.c<br>
index aafe7fd..8c21c33 100644<br>
--- a/src/intel/vulkan/genX_cmd_bu<wbr>ffer.c<br>
+++ b/src/intel/vulkan/genX_cmd_bu<wbr>ffer.c<br>
@@ -26,6 +26,7 @@<br>
<br>
#include "anv_private.h"<br>
#include "vk_format_info.h"<br>
+#include "util/vk_util.h"<br>
<br>
#include "common/gen_l3_config.h"<br>
#include "genxml/gen_macros.h"<br>
@@ -50,6 +51,17 @@ emit_lri(struct anv_batch *batch, uint32_t reg,<br>
uint32_t imm)<br>
}<br>
}<br>
<br>
+#if GEN_IS_HASWELL || GEN_GEN >= 8<br>
+static void<br>
+emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src)<br>
+{<br>
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {<br>
+ lrr.SourceRegisterAddre<wbr>ss = src;<br>
+ lrr.DestinationRegister<wbr>Address = dst;<br>
+ }<br>
+}<br>
+#endif<br>
+<br>
void<br>
genX(cmd_buffer_emit_state_ba<wbr>se_address)(struct anv_cmd_buffer<br>
*cmd_buffer)<br>
{<br>
@@ -1525,7 +1537,13 @@ genX(cmd_buffer_flush_state)(s<wbr>truct<br>
anv_cmd_buffer *cmd_buffer)<br>
.MemoryObjectCont<wbr>rolState = GENX(MOCS),<br>
#else<br>
.BufferAccessType = pipeline->instancing_enable[vb<wbr>] ?<br>
INSTANCEDATA : VERTEXDATA,<br>
- .InstanceDataStep<wbr>Rate = 1,<br>
+ /* Our implementation of VK_KHR_multiview uses<br>
instancing to draw<br>
+ * the different views. If the client asks for<br>
instancing, we<br>
+ * need to use the Instance Data Step Rate to ensure<br>
that we<br>
+ * repeat the client's per-instance data once for each<br>
view.<br>
+ */<br>
+ .InstanceDataStep<wbr>Rate =<br>
+ _mesa_bitcount<wbr>(pipeline->subpass->view_mask)<wbr>,<br>
<br>
</div></div>mmm... shouldn't this be:<br>
<br>
.InstanceDataStepRate = anv_subpass_view_count(pipelin<wbr>e->subpass);<br>
<br>
so that we set this to 1 when multiview is not in use? (view_mask == 0)<br></blockquote><div><br></div><div>Good call! You're absolutely right. This line is older than anv_subpass_view_count, I think.<br></div><div> </div><blockquote type="cite">
<div><div class="m_6150988601065960206h5"><br>
.VertexBufferMemo<wbr>ryObjectControlState = GENX(MOCS),<br>
#endif<br>
<br>
@@ -1715,6 +1733,11 @@ void genX(CmdDraw)(<br>
if (vs_prog_data->uses_drawid)<br>
emit_draw_index(cmd_buf<wbr>fer, 0);<br>
<br>
+ /* Our implementation of VK_KHR_multiview uses instancing to draw<br>
the<br>
+ * different views. We need to multiply instanceCount by the<br>
view count.<br>
+ */<br>
+ instanceCount *= anv_subpass_view_count(cmd_buf<wbr>fer-<br>
>state.subpass);<br>
+<br>
anv_batch_emit(&cmd_buffer<wbr>->batch, GENX(3DPRIMITIVE), prim) {<br>
prim.VertexAccessType <wbr> = SEQUENTIAL;<br>
prim.PrimitiveTopologyT<wbr>ype = pipeline->topology;<br>
@@ -1748,6 +1771,11 @@ void genX(CmdDrawIndexed)(<br>
if (vs_prog_data->uses_drawid)<br>
emit_draw_index(cmd_buf<wbr>fer, 0);<br>
<br>
+ /* Our implementation of VK_KHR_multiview uses instancing to draw<br>
the<br>
+ * different views. We need to multiply instanceCount by the<br>
view count.<br>
+ */<br>
+ instanceCount *= anv_subpass_view_count(cmd_buf<wbr>fer-<br>
>state.subpass);<br>
+<br>
anv_batch_emit(&cmd_buffer<wbr>->batch, GENX(3DPRIMITIVE), prim) {<br>
prim.VertexAccessType <wbr> = RANDOM;<br>
prim.PrimitiveTopologyT<wbr>ype = pipeline->topology;<br>
@@ -1767,6 +1795,90 @@ void genX(CmdDrawIndexed)(<br>
#define GEN7_3DPRIM_START_INSTANCE <wbr> 0x243C<br>
#define GEN7_3DPRIM_BASE_VERTEX <wbr> 0x2440<br>
<br>
+/* MI_MATH only exists on Haswell+ */<br>
+#if GEN_IS_HASWELL || GEN_GEN >= 8<br>
+<br>
+#define alu_opcode(v) __gen_uint((v)<wbr>, 20, 31)<br>
+#define alu_operand1(v) __gen_uint((v), 10, 19)<br>
+#define alu_operand2(v) __gen_uint((v), 0, 9)<br>
+#define alu(opcode, operand1, operand2) \<br>
+ alu_opcode(opcode) | alu_operand1(operand1) |<br>
alu_operand2(operand2)<br>
+<br>
+#define OPCODE_NOOP 0x000<br>
+#define OPCODE_LOAD 0x080<br>
+#define OPCODE_LOADINV 0x480<br>
+#define OPCODE_LOAD0 0x081<br>
+#define OPCODE_LOAD1 0x481<br>
+#define OPCODE_ADD 0x100<br>
+#define OPCODE_SUB 0x101<br>
+#define OPCODE_AND 0x102<br>
+#define OPCODE_OR 0x103<br>
+#define OPCODE_XOR 0x104<br>
+#define OPCODE_STORE 0x180<br>
+#define OPCODE_STOREINV 0x580<br>
+<br>
+#define OPERAND_R0 0x00<br>
+#define OPERAND_R1 0x01<br>
+#define OPERAND_R2 0x02<br>
+#define OPERAND_R3 0x03<br>
+#define OPERAND_R4 0x04<br>
+#define OPERAND_SRCA 0x20<br>
+#define OPERAND_SRCB 0x21<br>
+#define OPERAND_ACCU 0x31<br>
+#define OPERAND_ZF 0x32<br>
+#define OPERAND_CF 0x33<br>
+<br>
+#define CS_GPR(n) (0x2600 + (n) * 8)<br>
+<br>
+/* Emit dwords to multiply GPR0 by N */<br>
+static void<br>
+build_alu_multiply_gpr0(uint3<wbr>2_t *dw, unsigned *dw_count, uint32_t<br>
N)<br>
+{<br>
+ VK_OUTARRAY_MAKE(out, dw, dw_count);<br>
+<br>
+#define append_alu(opcode, operand1, operand2) \<br>
+ vk_outarray_append(&out, alu_dw) *alu_dw = alu(opcode, operand1,<br>
operand2)<br>
+<br>
+ assert(N > 0);<br>
+ unsigned top_bit = 31 - __builtin_clz(N);<br>
+ for (int i = top_bit - 1; i >= 0; i--) {<br>
+ /* We get our initial data in GPR0 and we write the final data<br>
out to<br>
+ * GPR0 but we use GPR1 as our scratch register.<br>
+ */<br>
+ unsigned src_reg = i == top_bit - 1 ? OPERAND_R0 : OPERAND_R1;<br>
+ unsigned dst_reg = i == 0 ? OPERAND_R0 : OPERAND_R1;<br>
+<br>
+ /* Shift the current value left by 1 */<br>
+ append_alu(OPCODE_LOAD, OPERAND_SRCA, src_reg);<br>
+ append_alu(OPCODE_LOAD, OPERAND_SRCB, src_reg);<br>
+ append_alu(OPCODE_AND, 0, 0);<br>
+<br>
+ if (N & (1 << i)) {<br>
+ /* Store ACCU to R1 and add R0 to R1 */<br>
+ append_alu(OPCODE_ST<wbr>ORE, OPERAND_R1, OPERAND_ACCU);<br>
+ append_alu(OPCODE_LO<wbr>AD, OPERAND_SRCA, OPERAND_R0);<br>
+ append_alu(OPCODE_LO<wbr>AD, OPERAND_SRCB, OPERAND_R1);<br>
+ append_alu(OPCODE_AN<wbr>D, 0, 0);<br>
+ }<br>
+<br>
+ append_alu(OPCODE_STORE<wbr>, dst_reg, OPERAND_ACCU);<br>
+ }<br>
+<br>
+#undef append_alu<br>
+}<br>
+<br>
+static void<br>
+emit_mul_gpr0(struct anv_batch *batch, uint32_t N)<br>
+{<br>
+ uint32_t num_dwords;<br>
> + build_alu_multiply_gpr0(NU<wbr>LL, &num_dwords, N);<br>
<br>
</div></div>I don't get this, it seems that the expectation of this code is<br>
that build_alu_multiply_gpr0() fills in num_dwords when we call it<br>
with NULL in the first argument, but I don't see where it does that,<br>
what am I missing?<br></blockquote><div><br></div><div>build_alu_multiply_gpr0 has the same semantics as the Vulkan query functions that return arrays. If you call it with NULL, it returns the number of dwords required. Otherwise, it assumes num_dwords is the number of dwords that it has available to write. I made use of vk_outarray to get this behavior. Yeah, it's a bit deceptive, but it worked out really well. :-) Feel free to tell me that it's a bit too clever and that I should make it simpler.<br></div></div></div></div></blockquote><div><br></div></div></div><div>No, it is okay, I understood what the intended behavior was but somehow I didn't see clearly how the macros in vk-util achieved that and got confused, I looked at it more carefully now and I think it is clear and make the implementation easier too.</div><div><div class="h5"><div><br></div><blockquote type="cite"><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote"><blockquote type="cite"><div><div class="m_6150988601065960206h5">
+ <br>
+ uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords,<br>
GENX(MI_MATH));<br>
+ build_alu_multiply_gpr0(dw<wbr>, &num_dwords, N);<br>
+}<br>
+<br>
+#endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */<br>
+<br>
static void<br>
load_indirect_parameters(stru<wbr>ct anv_cmd_buffer *cmd_buffer,<br>
stru<wbr>ct anv_buffer *buffer, uint64_t offset,<br>
@@ -1777,7 +1889,22 @@ load_indirect_parameters(struc<wbr>t anv_cmd_buffer<br>
*cmd_buffer,<br>
uint32_t bo_offset = buffer->offset + offset;<br>
<br>
emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);<br>
- emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);<br>
+<br>
+ unsigned view_count = anv_subpass_view_count(cmd_buf<wbr>fer-<br>
>state.subpass);<br>
+ if (view_count > 1) {<br>
+#if GEN_IS_HASWELL || GEN_GEN >= 8<br>
+ emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);<br>
+ emit_mul_gpr0(batch, view_count);<br>
+ emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));<br>
+#else<br>
+ anv_finishme("Multiview + indirect draw requires MI_MATH\n"<br>
+ "MI_MATH is not supported on Ivy Bridge");<br>
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset +<br>
4);<br>
+#endif<br>
+ } else {<br>
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset +<br>
4);<br>
+ }<br>
+<br>
emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);<br>
<br>
if (indexed) {<br>
@@ -2531,6 +2658,16 @@ genX(cmd_buffer_set_subpass)(s<wbr>truct<br>
anv_cmd_buffer *cmd_buffer,<br>
<br>
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;<br>
<br>
+ /* Our implementation of VK_KHR_multiview uses instancing to draw<br>
the<br>
+ * different views. If the client asks for instancing, we need<br>
to use the<br>
+ * Instance Data Step Rate to ensure that we repeat the client's<br>
+ * per-instance data once for each view. Since this bit is in<br>
+ * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers<br>
at the top<br>
+ * of each subpass.<br>
+ */<br>
+ if (GEN_GEN == 7)<br>
+ cmd_buffer->state.vb_di<wbr>rty |= ~0;<br>
> +<br>
<br>
</div></div>Shouldn't we do:<br>
<br>
if (GEN_GEN == 7 && subpass->view_mask != 0)<br>
cmd_buffer->state.vb_dirty |= ~0;<br>
<br>
So this doesn't affect cases that don't use multiview? <br></blockquote><div><br></div>Yes, we could do that because we're guaranteed that, for each render pass, either all subpass view masks are 0 or all are non-zero. This seems safer though and I doubt flushing vertex buffers at the subpass boundary will hurt anyone.<br></div></div></div></blockquote><div><br></div></div></div><div>Right, makes sense.</div><div><br></div><div>Reviewed-by: Iago Toral Quiroga <<a>itoral@igalia.com></a></div></div></blockquote><div><br></div><div>Thanks!<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><div><div><div class="h5"><blockquote type="cite"><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote"><blockquote type="cite"><div><div class="m_6150988601065960206h5">
> /* Perform transitions to the subpass layout before any writes<br>
> have<br>
* occurred.<br>
*/<br>
diff --git a/src/intel/vulkan/genX_pipeli<wbr>ne.c<br>
b/src/intel/vulkan/genX_pipeli<wbr>ne.c<br>
index 85a9e4f..9aaa8a0 100644<br>
--- a/src/intel/vulkan/genX_pipeli<wbr>ne.c<br>
+++ b/src/intel/vulkan/genX_pipeli<wbr>ne.c<br>
@@ -155,9 +155,13 @@ emit_vertex_input(struct anv_pipeline *pipeline,<br>
anv_batch_emit(&pipelin<wbr>e->batch, GENX(3DSTATE_VF_INSTANCING),<br>
vfi) {<br>
vfi.InstancingEnable = pipeline->instancing_enable[de<wbr>sc-<br>
>binding];<br>
vfi.VertexElementInd<wbr>ex = slot;<br>
- /* Vulkan so far doesn't have an instance divisor, so<br>
- * this is always 1 (ignored if not instancing). */<br>
- vfi.InstanceDataStep<wbr>Rate = 1;<br>
+ /* Our implementation of VK_KHX_multiview uses instancing<br>
to draw<br>
+ * the different views. If the client asks for instancing,<br>
we<br>
+ * need to use the Instance Data Step Rate to ensure that<br>
we<br>
+ * repeat the client's per-instance data once for each<br>
view.<br>
+ */<br>
+ vfi.InstanceDataStep<wbr>Rate =<br>
> + _mesa_bitcount(pi<wbr>peline->subpass->view_mask);<br>
<br>
</div></div>Same comment as above, shouldn't this be:<br>
<br>
vfi.InstanceDataStepRate = anv_subpass_view_count(pipelin<wbr>e->subpass);<br></blockquote><div><br></div><div class="gmail_quote"><blockquote type="cite"><div><div class="m_6150988601065960206h5">
+ <br>
+ uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords,<br>
GENX(MI_MATH));<br>
+ build_alu_multiply_gpr0(dw<wbr>, &num_dwords, N);<br>
+}<br>
+<br>
+#endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */<br>
+<br>
static void<br>
load_indirect_parameters(stru<wbr>ct anv_cmd_buffer *cmd_buffer,<br>
stru<wbr>ct anv_buffer *buffer, uint64_t offset,<br>
@@ -1777,7 +1889,22 @@ load_indirect_parameters(struc<wbr>t anv_cmd_buffer<br>
*cmd_buffer,<br>
uint32_t bo_offset = buffer->offset + offset;<br>
<br>
emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);<br>
- emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);<br>
+<br>
+ unsigned view_count = anv_subpass_view_count(cmd_buf<wbr>fer-<br>
>state.subpass);<br>
+ if (view_count > 1) {<br>
+#if GEN_IS_HASWELL || GEN_GEN >= 8<br>
+ emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);<br>
+ emit_mul_gpr0(batch, view_count);<br>
+ emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));<br>
+#else<br>
+ anv_finishme("Multiview + indirect draw requires MI_MATH\n"<br>
+ "MI_MATH is not supported on Ivy Bridge");<br>
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset +<br>
4);<br>
+#endif<br>
+ } else {<br>
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset +<br>
4);<br>
+ }<br>
+<br>
emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);<br>
<br>
if (indexed) {<br>
@@ -2531,6 +2658,16 @@ genX(cmd_buffer_set_subpass)(s<wbr>truct<br>
anv_cmd_buffer *cmd_buffer,<br>
<br>
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;<br>
<br>
+ /* Our implementation of VK_KHR_multiview uses instancing to draw<br>
the<br>
+ * different views. If the client asks for instancing, we need<br>
to use the<br>
+ * Instance Data Step Rate to ensure that we repeat the client's<br>
+ * per-instance data once for each view. Since this bit is in<br>
+ * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers<br>
at the top<br>
+ * of each subpass.<br>
+ */<br>
+ if (GEN_GEN == 7)<br>
+ cmd_buffer->state.vb_di<wbr>rty |= ~0;<br>
> +<br>
<br>
</div></div>Shouldn't we do:<br>
<br>
if (GEN_GEN == 7 && subpass->view_mask != 0)<br>
cmd_buffer->state.vb_dirty |= ~0;<br>
<br>
So this doesn't affect cases that don't use multiview? <br></blockquote><div><br></div>Yes, we could do that because we're guaranteed that, for each render pass, either all subpass view masks are 0 or all are non-zero. This seems safer though and I doubt flushing vertex buffers at the subpass boundary will hurt anyone.<br></div><div><br></div></div></div></div>
</blockquote><div><br></div></div></div></div></blockquote></div><br></div></div>