[Mesa-dev] [PATCH v2 05/13] i965: Allocate URB space for HS and DS stages when required.
Kenneth Graunke
kenneth at whitecape.org
Mon Dec 14 02:44:23 PST 2015
On Saturday, December 12, 2015 10:39:13 AM Jordan Justen wrote:
> On 2015-12-11 13:23:54, Kenneth Graunke wrote:
> > From: Chris Forbes <chrisf at ijw.co.nz>
> >
> > v2: Rewrite the push constant allocation code to be clearer.
> > Only apply the minimum VS entries workaround on Gen 8.
> >
> > Signed-off-by: Chris Forbes <chrisf at ijw.co.nz>
> > Signed-off-by: Kenneth Graunke <kenneth at whitecape.org>
> > ---
> > src/mesa/drivers/dri/i965/brw_context.h | 21 +++-
> > src/mesa/drivers/dri/i965/gen7_blorp.cpp | 8 ++
> > src/mesa/drivers/dri/i965/gen7_urb.c | 174 ++++++++++++++++++++++++-------
> > 3 files changed, 166 insertions(+), 37 deletions(-)
> >
> > diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
> > index 1cc4c7b..69bc04c 100644
> > --- a/src/mesa/drivers/dri/i965/brw_context.h
> > +++ b/src/mesa/drivers/dri/i965/brw_context.h
> > @@ -1008,6 +1008,8 @@ struct brw_context
> > struct {
> > GLuint vsize; /* vertex size plus header in urb registers */
> > GLuint gsize; /* GS output size in urb registers */
> > + GLuint hsize; /* Tessellation control output size in urb registers */
> > + GLuint dsize; /* Tessellation evaluation output size in urb registers */
> > GLuint csize; /* constant buffer size in urb registers */
> > GLuint sfsize; /* setup data size in urb registers */
> >
> > @@ -1020,12 +1022,16 @@ struct brw_context
> > GLuint max_gs_entries; /* Maximum number of GS entries */
> >
> > GLuint nr_vs_entries;
> > + GLuint nr_hs_entries;
> > + GLuint nr_ds_entries;
> > GLuint nr_gs_entries;
> > GLuint nr_clip_entries;
> > GLuint nr_sf_entries;
> > GLuint nr_cs_entries;
> >
> > GLuint vs_start;
> > + GLuint hs_start;
> > + GLuint ds_start;
> > GLuint gs_start;
> > GLuint clip_start;
> > GLuint sf_start;
> > @@ -1042,6 +1048,11 @@ struct brw_context
> > * URB space for the GS.
> > */
> > bool gs_present;
> > +
> > + /* True if the most recently sent _3DSTATE_URB message allocated
> > + * URB space for the HS and DS.
> > + */
> > + bool tess_present;
> > } urb;
> >
> >
> > @@ -1648,12 +1659,18 @@ void gen8_emit_3dstate_sample_pattern(struct brw_context *brw);
> > /* gen7_urb.c */
> > void
> > gen7_emit_push_constant_state(struct brw_context *brw, unsigned vs_size,
> > + unsigned hs_size, unsigned ds_size,
> > unsigned gs_size, unsigned fs_size);
> >
> > void
> > gen7_emit_urb_state(struct brw_context *brw,
> > - unsigned nr_vs_entries, unsigned vs_size,
> > - unsigned vs_start, unsigned nr_gs_entries,
> > + unsigned nr_vs_entries,
> > + unsigned vs_size, unsigned vs_start,
> > + unsigned nr_hs_entries,
> > + unsigned hs_size, unsigned hs_start,
> > + unsigned nr_ds_entries,
> > + unsigned ds_size, unsigned ds_start,
> > + unsigned nr_gs_entries,
> > unsigned gs_size, unsigned gs_start);
> >
> >
> > diff --git a/src/mesa/drivers/dri/i965/gen7_blorp.cpp b/src/mesa/drivers/dri/i965/gen7_blorp.cpp
> > index e87b9d1..89b73ca 100644
> > --- a/src/mesa/drivers/dri/i965/gen7_blorp.cpp
> > +++ b/src/mesa/drivers/dri/i965/gen7_blorp.cpp
> > @@ -50,6 +50,8 @@ gen7_blorp_emit_urb_config(struct brw_context *brw)
> > unsigned urb_size = (brw->is_haswell && brw->gt == 3) ? 32 : 16;
> > gen7_emit_push_constant_state(brw,
> > urb_size / 2 /* vs_size */,
> > + 0 /* hs_size */,
> > + 0 /* ds_size */,
> > 0 /* gs_size */,
> > urb_size / 2 /* fs_size */);
> >
> > @@ -60,6 +62,12 @@ gen7_blorp_emit_urb_config(struct brw_context *brw)
> > 32 /* num_vs_entries */,
> > 2 /* vs_size */,
> > 2 /* vs_start */,
> > + 0 /* num_hs_entries */,
> > + 1 /* hs_size */,
> > + 2 /* hs_start */,
> > + 0 /* num_ds_entries */,
> > + 1 /* ds_size */,
> > + 2 /* ds_start */,
> > 0 /* num_gs_entries */,
> > 1 /* gs_size */,
> > 2 /* gs_start */);
> > diff --git a/src/mesa/drivers/dri/i965/gen7_urb.c b/src/mesa/drivers/dri/i965/gen7_urb.c
> > index 99a9d3c..a598c89 100644
> > --- a/src/mesa/drivers/dri/i965/gen7_urb.c
> > +++ b/src/mesa/drivers/dri/i965/gen7_urb.c
> > @@ -34,7 +34,7 @@
> > * __________-__________ _________________-_________________
> > * / \ / \
> > * +-------------------------------------------------------------+
> > - * | VS/FS/GS Push | VS/GS URB |
> > + * | VS/HS/DS/GS/FS Push | VS/HS/DS/GS URB |
> > * | Constants | Entries |
> > * +-------------------------------------------------------------+
> > *
> > @@ -60,28 +60,28 @@
> > static void
> > gen7_allocate_push_constants(struct brw_context *brw)
> > {
> > + /* BRW_NEW_GEOMETRY_PROGRAM */
> > + bool gs_present = brw->geometry_program;
> > +
> > + /* BRW_NEW_TESS_CTRL_PROGRAM, BRW_NEW_TESS_EVAL_PROGRAM */
> > + bool tess_present = brw->tess_eval_program;
> > +
> > unsigned avail_size = 16;
> > unsigned multiplier =
> > (brw->gen >= 8 || (brw->is_haswell && brw->gt == 3)) ? 2 : 1;
> >
> > - /* BRW_NEW_GEOMETRY_PROGRAM */
> > - bool gs_present = brw->geometry_program;
> > + int stages = 2 + gs_present + tess_present;
> >
> > - unsigned vs_size, gs_size;
> > - if (gs_present) {
> > - vs_size = avail_size / 3;
> > - avail_size -= vs_size;
> > - gs_size = avail_size / 2;
> > - avail_size -= gs_size;
> > - } else {
> > - vs_size = avail_size / 2;
> > - avail_size -= vs_size;
> > - gs_size = 0;
> > - }
> > - unsigned fs_size = avail_size;
> > + unsigned size_per_stage = multiplier * (avail_size / stages);
>
> Would it be safe to do the multiplication before the division?
Technically, it would - the numbers work out:
$ python
>>> [(16 // i) * 2 for i in [2, 3, 4, 5]]
[16, 10, 8, 6]
>>> [(32 // i) for i in [2, 3, 4, 5]]
[16, 10, 8, 6]
The point of the multiplier is that HSW GT3 and BDW+ have 32kB of push
constant space (rather than 16kB), but the allocations have to be
multiples of 2kB instead of multiples of 1kB.
It wasn't obvious to me that dividing 32kB by the number of active
shader stages would produce a multiple of 2. I'd like to avoid making
that change in this patch, though, as what I've done here matches our
current behavior.
However, I just realized this isn't equivalent to the old code, and
isn't optimal: with 3 shaders (VS, GS, FS), we used to break up the
16kB as <VS=5, GS=5, PS=6>. For all stages, Chris's algorithm
would do: <VS=3, HS=3, DS=3, GS=3, PS=4>. My simplified algorithm
fails to allocate the bonus space resulting from floor-division to
the PS stage.
I'll fix that...
> > +
> > + unsigned vs_size = size_per_stage;
> > + unsigned hs_size = tess_present ? size_per_stage : 0;
> > + unsigned ds_size = tess_present ? size_per_stage : 0;
> > + unsigned gs_size = gs_present ? size_per_stage : 0;
> > + unsigned fs_size = size_per_stage;
> >
> > - gen7_emit_push_constant_state(brw, multiplier * vs_size,
> > - multiplier * gs_size, multiplier * fs_size);
> > + gen7_emit_push_constant_state(brw, vs_size, hs_size, ds_size, gs_size,
> > + fs_size);
> >
> > /* From p115 of the Ivy Bridge PRM (3.2.1.4 3DSTATE_PUSH_CONSTANT_ALLOC_VS):
> > *
> > @@ -99,15 +99,24 @@ gen7_allocate_push_constants(struct brw_context *brw)
> >
> > void
> > gen7_emit_push_constant_state(struct brw_context *brw, unsigned vs_size,
> > + unsigned hs_size, unsigned ds_size,
> > unsigned gs_size, unsigned fs_size)
> > {
> > unsigned offset = 0;
> >
> > - BEGIN_BATCH(6);
> > + BEGIN_BATCH(10);
> > OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_VS << 16 | (2 - 2));
> > OUT_BATCH(vs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
> > offset += vs_size;
> >
> > + OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_HS << 16 | (2 - 2));
> > + OUT_BATCH(hs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
> > + offset += hs_size;
> > +
> > + OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_DS << 16 | (2 - 2));
> > + OUT_BATCH(ds_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
> > + offset += ds_size;
> > +
> > OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_GS << 16 | (2 - 2));
> > OUT_BATCH(gs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
> > offset += gs_size;
> > @@ -130,7 +139,10 @@ gen7_emit_push_constant_state(struct brw_context *brw, unsigned vs_size,
> > const struct brw_tracked_state gen7_push_constant_space = {
> > .dirty = {
> > .mesa = 0,
> > - .brw = BRW_NEW_CONTEXT | BRW_NEW_GEOMETRY_PROGRAM,
> > + .brw = BRW_NEW_CONTEXT |
> > + BRW_NEW_GEOMETRY_PROGRAM |
> > + BRW_NEW_TESS_CTRL_PROGRAM |
> > + BRW_NEW_TESS_EVAL_PROGRAM,
> > },
> > .emit = gen7_allocate_push_constants,
> > };
> > @@ -138,6 +150,7 @@ const struct brw_tracked_state gen7_push_constant_space = {
> > static void
> > gen7_upload_urb(struct brw_context *brw)
> > {
> > + const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
> > const int push_size_kB =
> > (brw->gen >= 8 || (brw->is_haswell && brw->gt == 3)) ? 32 : 16;
> >
> > @@ -149,6 +162,16 @@ gen7_upload_urb(struct brw_context *brw)
> > unsigned gs_size = gs_present ? brw->gs.prog_data->base.urb_entry_size : 1;
> > unsigned gs_entry_size_bytes = gs_size * 64;
> >
> > + /* BRW_NEW_TESS_CTRL_PROGRAM, BRW_NEW_TCS_PROG_DATA */
> > + /* BRW_NEW_TESS_EVAL_PROGRAM, BRW_NEW_TES_PROG_DATA */
>
> Maybe merge these 2 comments.
>
> > + const bool tess_present = brw->tess_eval_program;
> > + if (tess_present)
> > + assert(brw->tess_ctrl_program);
>
> I remember also seeing this as:
>
> assert(!tess_present || brw->tess_ctrl_program);
>
> which I slightly preferred.
Yep, I agree. Changed.
> > + unsigned hs_size = tess_present ? brw->tcs.prog_data->base.urb_entry_size : 1;
> > + unsigned hs_entry_size_bytes = hs_size * 64;
> > + unsigned ds_size = tess_present ? brw->tes.prog_data->base.urb_entry_size : 1;
> > + unsigned ds_entry_size_bytes = ds_size * 64;
> > +
> > /* If we're just switching between programs with the same URB requirements,
> > * skip the rest of the logic.
> > */
> > @@ -156,21 +179,29 @@ gen7_upload_urb(struct brw_context *brw)
> > !(brw->ctx.NewDriverState & BRW_NEW_URB_SIZE) &&
> > brw->urb.vsize == vs_size &&
> > brw->urb.gs_present == gs_present &&
> > - brw->urb.gsize == gs_size) {
> > + brw->urb.gsize == gs_size &&
> > + brw->urb.tess_present == tess_present &&
> > + brw->urb.hsize == hs_size &&
> > + brw->urb.dsize == ds_size) {
> > return;
> > }
> > brw->urb.vsize = vs_size;
> > brw->urb.gs_present = gs_present;
> > brw->urb.gsize = gs_size;
> > + brw->urb.tess_present = tess_present;
> > + brw->urb.hsize = hs_size;
> > + brw->urb.dsize = ds_size;
> >
> > /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
> > *
> > * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
> > * Allocation Size is less than 9 512-bit URB entries.
> > *
> > - * Similar text exists for GS.
> > + * Similar text exists for HS, DS and GS.
> > */
> > unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
> > + unsigned hs_granularity = (hs_size < 9) ? 8 : 1;
> > + unsigned ds_granularity = (ds_size < 9) ? 8 : 1;
> > unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
> >
> > /* URB allocations must be done in 8k chunks. */
> > @@ -191,9 +222,17 @@ gen7_upload_urb(struct brw_context *brw)
> > * additional space it could actually make use of).
> > */
> >
> > - /* VS has a lower limit on the number of URB entries */
> > + /* VS has a lower limit on the number of URB entries.
> > + *
> > + * From the Broadwell PRM, 3DSTATE_URB_VS instruction:
> > + * "When tessellation is enabled, the VS Number of URB Entries must be
> > + * greater than or equal to 192."
> > + */
> > + unsigned vs_min_entries =
> > + tess_present && brw->gen == 8 ? 192 : brw->urb.min_vs_entries;
> > +
> > unsigned vs_chunks =
> > - ALIGN(brw->urb.min_vs_entries * vs_entry_size_bytes, chunk_size_bytes) /
> > + ALIGN(vs_min_entries * vs_entry_size_bytes, chunk_size_bytes) /
> > chunk_size_bytes;
> > unsigned vs_wants =
> > ALIGN(brw->urb.max_vs_entries * vs_entry_size_bytes,
> > @@ -217,14 +256,36 @@ gen7_upload_urb(struct brw_context *brw)
> > chunk_size_bytes) / chunk_size_bytes - gs_chunks;
> > }
> >
> > + unsigned hs_chunks = 0;
> > + unsigned hs_wants = 0;
> > + unsigned ds_chunks = 0;
> > + unsigned ds_wants = 0;
> > +
> > + if (tess_present) {
> > + hs_chunks =
> > + ALIGN(hs_granularity * hs_entry_size_bytes, chunk_size_bytes) /
> > + chunk_size_bytes;
> > + hs_wants =
> > + ALIGN(brw->urb.max_hs_entries * hs_entry_size_bytes,
> > + chunk_size_bytes) / chunk_size_bytes - hs_chunks;
> > +
> > + ds_chunks =
> > + ALIGN(devinfo->urb.min_ds_entries * ds_entry_size_bytes, chunk_size_bytes) /
> > + chunk_size_bytes;
> > + ds_wants =
> > + ALIGN(brw->urb.max_ds_entries * ds_entry_size_bytes,
> > + chunk_size_bytes) / chunk_size_bytes - ds_chunks;
> > + }
> > +
> > /* There should always be enough URB space to satisfy the minimum
> > * requirements of each stage.
> > */
> > - unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
> > + unsigned total_needs = push_constant_chunks +
> > + vs_chunks + hs_chunks + ds_chunks + gs_chunks;
> > assert(total_needs <= urb_chunks);
> >
> > /* Mete out remaining space (if any) in proportion to "wants". */
> > - unsigned total_wants = vs_wants + gs_wants;
> > + unsigned total_wants = vs_wants + hs_wants + ds_wants + gs_wants;
> > unsigned remaining_space = urb_chunks - total_needs;
> > if (remaining_space > total_wants)
> > remaining_space = total_wants;
> > @@ -233,61 +294,99 @@ gen7_upload_urb(struct brw_context *brw)
> > roundf(vs_wants * (((float) remaining_space) / total_wants));
> > vs_chunks += vs_additional;
> > remaining_space -= vs_additional;
> > + total_wants -= vs_wants;
> > +
> > + unsigned hs_additional = (unsigned)
> > + round(hs_wants * (((double) remaining_space) / total_wants));
> > + hs_chunks += hs_additional;
> > + remaining_space -= hs_additional;
> > + total_wants -= hs_wants;
> > +
> > + unsigned ds_additional = (unsigned)
> > + round(ds_wants * (((double) remaining_space) / total_wants));
> > + ds_chunks += ds_additional;
> > + remaining_space -= ds_additional;
> > + total_wants -= ds_wants;
> > +
> > gs_chunks += remaining_space;
> > }
> >
> > /* Sanity check that we haven't over-allocated. */
> > - assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
> > + assert(push_constant_chunks +
> > + vs_chunks + hs_chunks + ds_chunks + gs_chunks <= urb_chunks);
> >
> > /* Finally, compute the number of entries that can fit in the space
> > * allocated to each stage.
> > */
> > unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
> > + unsigned nr_hs_entries = hs_chunks * chunk_size_bytes / hs_entry_size_bytes;
> > + unsigned nr_ds_entries = ds_chunks * chunk_size_bytes / ds_entry_size_bytes;
> > unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
> >
> > /* Since we rounded up when computing *_wants, this may be slightly more
> > * than the maximum allowed amount, so correct for that.
> > */
> > nr_vs_entries = MIN2(nr_vs_entries, brw->urb.max_vs_entries);
> > + nr_hs_entries = MIN2(nr_hs_entries, brw->urb.max_hs_entries);
> > + nr_ds_entries = MIN2(nr_ds_entries, brw->urb.max_ds_entries);
> > nr_gs_entries = MIN2(nr_gs_entries, brw->urb.max_gs_entries);
> >
> > /* Ensure that we program a multiple of the granularity. */
> > nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
> > + nr_hs_entries = ROUND_DOWN_TO(nr_hs_entries, hs_granularity);
> > + nr_ds_entries = ROUND_DOWN_TO(nr_ds_entries, ds_granularity);
> > nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
> >
> > /* Finally, sanity check to make sure we have at least the minimum number
> > * of entries needed for each stage.
> > */
> > - assert(nr_vs_entries >= brw->urb.min_vs_entries);
> > + assert(nr_vs_entries >= vs_min_entries);
> > if (gs_present)
> > assert(nr_gs_entries >= 2);
> > + if (tess_present) {
> > + assert(nr_hs_entries >= 1);
> > + assert(nr_ds_entries >= devinfo->urb.min_ds_entries);
> > + }
> >
> > /* Gen7 doesn't actually use brw->urb.nr_{vs,gs}_entries, but it seems
> > * better to put reasonable data in there rather than leave them
> > * uninitialized.
> > */
> > brw->urb.nr_vs_entries = nr_vs_entries;
> > + brw->urb.nr_hs_entries = nr_hs_entries;
> > + brw->urb.nr_ds_entries = nr_ds_entries;
> > brw->urb.nr_gs_entries = nr_gs_entries;
> >
> > /* Lay out the URB in the following order:
> > * - push constants
> > * - VS
> > + * - HS
> > + * - DS
> > * - GS
> > */
> > brw->urb.vs_start = push_constant_chunks;
> > - brw->urb.gs_start = push_constant_chunks + vs_chunks;
> > + brw->urb.hs_start = push_constant_chunks + vs_chunks;
> > + brw->urb.ds_start = push_constant_chunks + vs_chunks + hs_chunks;
> > + brw->urb.gs_start = push_constant_chunks + vs_chunks + hs_chunks + ds_chunks;
>
> Don't we want < 80 column lines?
>
> Reviewed-by: Jordan Justen <jordan.l.justen at intel.com>
We do. My terminal emulator has been broken for most of this year,
so I've been screwing this up more often lately:
https://bugs.kde.org/show_bug.cgi?id=345403
I'll try and fix those mistakes.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 819 bytes
Desc: This is a digitally signed message part.
URL: <http://lists.freedesktop.org/archives/mesa-dev/attachments/20151214/5d1776cf/attachment.sig>
More information about the mesa-dev
mailing list