[Mesa-dev] [PATCH v2 5/5] llvmpipe: Optimize lp_rast_triangle_32_3_16 for POWER8

Roland Scheidegger sroland at vmware.com
Tue Jan 5 16:48:14 PST 2016


Am 03.01.2016 um 16:17 schrieb Oded Gabbay:
> This patch converts the SSE-optimized lp_rast_triangle_32_3_16()
> to VMX/VSX.
> 
> I measured the results on POWER8 machine with 32 cores at 3.4GHz and
> 16GB of RAM.
> 
>                       FPS/Score
>  Name            Before     After    Delta
> ------------------------------------------------
> openarena        16.35      16.7     2.14%
> xonotic          4.707      4.97     5.57%
> 
> glmark2 didn't show a significant (more than 1%) difference.
> 
> v2: Make sure code is build only on POWER8 LE machine
> 
> Signed-off-by: Oded Gabbay <oded.gabbay at gmail.com>
> ---
>  src/gallium/drivers/llvmpipe/lp_rast_tri.c | 142 ++++++++++++++++++++++++++++-
>  1 file changed, 141 insertions(+), 1 deletion(-)
> 
> diff --git a/src/gallium/drivers/llvmpipe/lp_rast_tri.c b/src/gallium/drivers/llvmpipe/lp_rast_tri.c
> index 09a182a..232c859 100644
> --- a/src/gallium/drivers/llvmpipe/lp_rast_tri.c
> +++ b/src/gallium/drivers/llvmpipe/lp_rast_tri.c
> @@ -512,7 +512,145 @@ build_mask_linear_32(int c, int dcdx, int dcdy)
>     return vec_movemask_epi8(result);
>  }
>  
> -#endif /* _ARCH_PWR8 && PIPE_ARCH_LITTLE_ENDIAN */
> +static inline __m128i
> +lp_plane_to_m128i(const struct lp_rast_plane *plane)
> +{
> +   return vec_setr_epi32((int32_t)plane->c, (int32_t)plane->dcdx,
> +                         (int32_t)plane->dcdy, (int32_t)plane->eo);
> +}
> +
> +#define NR_PLANES 3
> +
> +void
> +lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
> +                      const union lp_rast_cmd_arg arg)
> +{
> +   const struct lp_rast_triangle *tri = arg.triangle.tri;
> +   const struct lp_rast_plane *plane = GET_PLANES(tri);
> +   int x = (arg.triangle.plane_mask & 0xff) + task->x;
> +   int y = (arg.triangle.plane_mask >> 8) + task->y;
> +   unsigned i, j;
> +
> +   struct { unsigned mask:16; unsigned i:8; unsigned j:8; } out[16];
> +   unsigned nr = 0;
> +
> +   __m128i p0 = lp_plane_to_m128i(&plane[0]); /* c, dcdx, dcdy, eo */
> +   __m128i p1 = lp_plane_to_m128i(&plane[1]); /* c, dcdx, dcdy, eo */
> +   __m128i p2 = lp_plane_to_m128i(&plane[2]); /* c, dcdx, dcdy, eo */
> +   __m128i zero = vec_splats((unsigned char) 0);
> +
> +   __m128i c;
> +   __m128i dcdx;
> +   __m128i dcdy;
> +   __m128i rej4;
> +
> +   __m128i dcdx2;
> +   __m128i dcdx3;
> +
> +   __m128i span_0;                /* 0,dcdx,2dcdx,3dcdx for plane 0 */
> +   __m128i span_1;                /* 0,dcdx,2dcdx,3dcdx for plane 1 */
> +   __m128i span_2;                /* 0,dcdx,2dcdx,3dcdx for plane 2 */
> +   __m128i unused;
> +
> +   __m128i vshuf_mask0;
> +   __m128i vshuf_mask1;
> +   __m128i vshuf_mask2;
> +
> +#ifdef PIPE_ARCH_LITTLE_ENDIAN
> +   vshuf_mask0 = (__m128i) vec_splats((unsigned int) 0x03020100);
> +   vshuf_mask1 = (__m128i) vec_splats((unsigned int) 0x07060504);
> +   vshuf_mask2 = (__m128i) vec_splats((unsigned int) 0x0B0A0908);
> +#else
> +   vshuf_mask0 = (__m128i) vec_splats((unsigned int) 0x0C0D0E0F);
> +   vshuf_mask1 = (__m128i) vec_splats((unsigned int) 0x08090A0B);
> +   vshuf_mask2 = (__m128i) vec_splats((unsigned int) 0x04050607);
> +#endif
> +
> +   transpose4_epi32(&p0, &p1, &p2, &zero,
> +                    &c, &dcdx, &dcdy, &rej4);
> +
> +   /* Adjust dcdx;
> +    */
> +   dcdx = vec_sub_epi32(zero, dcdx);
> +
> +   c = vec_add_epi32(c, vec_mullo_epi32(dcdx, (__m128i) vec_splats(x)));
> +   c = vec_add_epi32(c, vec_mullo_epi32(dcdy, (__m128i) vec_splats(y)));
> +   rej4 = vec_slli_epi32(rej4, 2);
> +
> +   /*
> +    * Adjust so we can just check the sign bit (< 0 comparison),
> +    * instead of having to do a less efficient <= 0 comparison
> +    */
> +   c = vec_sub_epi32(c, (__m128i) vec_splats((unsigned int) 1));
> +   rej4 = vec_add_epi32(rej4, (__m128i) vec_splats((unsigned int) 1));
> +
> +   dcdx2 = vec_add_epi32(dcdx, dcdx);
> +   dcdx3 = vec_add_epi32(dcdx2, dcdx);
> +
> +   transpose4_epi32(&zero, &dcdx, &dcdx2, &dcdx3,
> +                    &span_0, &span_1, &span_2, &unused);
> +
> +   for (i = 0; i < 4; i++) {
> +      __m128i cx = c;
> +
> +      for (j = 0; j < 4; j++) {
> +         __m128i c4rej = vec_add_epi32(cx, rej4);
> +         __m128i rej_masks = vec_srai_epi32(c4rej, 31);
> +
> +         /* if (is_zero(rej_masks)) */
> +         if (vec_movemask_epi8(rej_masks) == 0) {
> +            __m128i c0_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask0), span_0);
> +            __m128i c1_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask1), span_1);
> +            __m128i c2_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask2), span_2);
> +
> +            __m128i c_0 = vec_or(vec_or(c0_0, c1_0), c2_0);
> +
> +            __m128i c0_1 = vec_add_epi32(c0_0, vec_perm(dcdy, dcdy, vshuf_mask0));
> +            __m128i c1_1 = vec_add_epi32(c1_0, vec_perm(dcdy, dcdy, vshuf_mask1));
> +            __m128i c2_1 = vec_add_epi32(c2_0, vec_perm(dcdy, dcdy, vshuf_mask2));
> +
> +            __m128i c_1 = vec_or(vec_or(c0_1, c1_1), c2_1);
> +            __m128i c_01 = vec_packs_epi32(c_0, c_1);
> +
> +            __m128i c0_2 = vec_add_epi32(c0_1, vec_perm(dcdy, dcdy, vshuf_mask0));
> +            __m128i c1_2 = vec_add_epi32(c1_1, vec_perm(dcdy, dcdy, vshuf_mask1));
> +            __m128i c2_2 = vec_add_epi32(c2_1, vec_perm(dcdy, dcdy, vshuf_mask2));
> +
> +            __m128i c_2 = vec_or(vec_or(c0_2, c1_2), c2_2);
> +
> +            __m128i c0_3 = vec_add_epi32(c0_2, vec_perm(dcdy, dcdy, vshuf_mask0));
> +            __m128i c1_3 = vec_add_epi32(c1_2, vec_perm(dcdy, dcdy, vshuf_mask1));
> +            __m128i c2_3 = vec_add_epi32(c2_2, vec_perm(dcdy, dcdy, vshuf_mask2));
> +
> +            __m128i c_3 = vec_or(vec_or(c0_3, c1_3), c2_3);
> +            __m128i c_23 = vec_packs_epi32(c_2, c_3);
> +            __m128i c_0123 = vec_packs_epi16(c_01, c_23);
> +
> +            unsigned mask = vec_movemask_epi8(c_0123);
> +
> +            out[nr].i = i;
> +            out[nr].j = j;
> +            out[nr].mask = mask;
> +            if (mask != 0xffff)
> +               nr++;
> +         }
> +         cx = vec_add_epi32(cx, vec_slli_epi32(dcdx, 2));
> +      }
> +
> +      c = vec_add_epi32(c, vec_slli_epi32(dcdy, 2));
> +   }
> +
> +   for (i = 0; i < nr; i++)
> +      lp_rast_shade_quads_mask(task,
> +                               &tri->inputs,
> +                               x + 4 * out[i].j,
> +                               y + 4 * out[i].i,
> +                               0xffff & ~out[i].mask);
> +}
> +
> +#undef NR_PLANES
> +
> +#else
>  
>  void
>  lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
> @@ -524,6 +662,8 @@ lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
>     lp_rast_triangle_32_3(task, arg2);
>  }
>  
> +#endif /* _ARCH_PWR8 && PIPE_ARCH_LITTLE_ENDIAN */
> +
>  void
>  lp_rast_triangle_32_4_16(struct lp_rasterizer_task *task,
>                           const union lp_rast_cmd_arg arg)
> 

For the series (albeit I didn't actually check the assembly :-))...
Reviewed-by: Roland Scheidegger <sroland at vmware.com>

I suppose there's going to be some minor clashes with my 64bit avoidance
rasterization patches, but that shouldn't hold you back.

Roland



More information about the mesa-dev mailing list