[Pixman] [PATCH 1/4] ARM: NEON: Some cleanup of bilinear scanline functions

Maarten Bosmans mkbosmans at gmail.com
Wed Sep 21 01:45:25 PDT 2011


What's with the "dlast" in the comments?

2011/9/21 Taekyun Kim <podain77 at gmail.com>:
> From: Taekyun Kim <tkq.kim at samsung.com>
>
> Use STRIDE and initial horizontal weight update is done before
> entering interpolation loop.
> ---
>  pixman/pixman-arm-neon-asm-bilinear.S |  124 +++++++++++++++++----------------
>  1 files changed, 64 insertions(+), 60 deletions(-)
>
> diff --git a/pixman/pixman-arm-neon-asm-bilinear.S b/pixman/pixman-arm-neon-asm-bilinear.S
> index 3c7fe0f..6a4698f 100644
> --- a/pixman/pixman-arm-neon-asm-bilinear.S
> +++ b/pixman/pixman-arm-neon-asm-bilinear.S
> @@ -50,7 +50,7 @@
>  *
>  * Remarks
>  *  There can be lots of pipeline stalls inside code block and between code blocks.
> - *  Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
> + *  Further optimizations will be dlast by new macro templates using head/tail_head/tail scheme.
>  */
>
>  /* Prevent the stack from becoming executable for no reason... */
> @@ -92,21 +92,19 @@ fname:
>  */
>
>  .macro bilinear_load_8888 reg1, reg2, tmp
> -    mov       TMP2, X, asr #16
> +    mov       TMP1, X, asr #16
>     add       X, X, UX
> -    add       TMP1, TOP, TMP2, asl #2
> -    add       TMP2, BOTTOM, TMP2, asl #2
> -    vld1.32   {reg1}, [TMP1]
> -    vld1.32   {reg2}, [TMP2]
> +    add       TMP1, TOP, TMP1, asl #2
> +    vld1.32   {reg1}, [TMP1], STRIDE
> +    vld1.32   {reg2}, [TMP1]
>  .endm
>
>  .macro bilinear_load_0565 reg1, reg2, tmp
> -    mov       TMP2, X, asr #16
> +    mov       TMP1, X, asr #16
>     add       X, X, UX
> -    add       TMP1, TOP, TMP2, asl #1
> -    add       TMP2, BOTTOM, TMP2, asl #1
> -    vld1.32   {reg2[0]}, [TMP1]
> -    vld1.32   {reg2[1]}, [TMP2]
> +    add       TMP1, TOP, TMP1, asl #1
> +    vld1.32   {reg2[0]}, [TMP1], STRIDE
> +    vld1.32   {reg2[1]}, [TMP1]
>     convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
>  .endm
>
> @@ -134,18 +132,16 @@ fname:
>  .macro bilinear_load_and_vertical_interpolate_two_0565 \
>                 acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
>
> -    mov       TMP2, X, asr #16
> +    mov       TMP1, X, asr #16
>     add       X, X, UX
> -    mov       TMP4, X, asr #16
> +    add       TMP1, TOP, TMP1, asl #1
> +    mov       TMP2, X, asr #16
>     add       X, X, UX
> -    add       TMP1, TOP, TMP2, asl #1
> -    add       TMP2, BOTTOM, TMP2, asl #1
> -    add       TMP3, TOP, TMP4, asl #1
> -    add       TMP4, BOTTOM, TMP4, asl #1
> -    vld1.32   {acc2lo[0]}, [TMP1]
> -    vld1.32   {acc2hi[0]}, [TMP3]
> -    vld1.32   {acc2lo[1]}, [TMP2]
> -    vld1.32   {acc2hi[1]}, [TMP4]
> +    add       TMP2, TOP, TMP2, asl #1
> +    vld1.32   {acc2lo[0]}, [TMP1], STRIDE
> +    vld1.32   {acc2hi[0]}, [TMP2], STRIDE
> +    vld1.32   {acc2lo[1]}, [TMP1]
> +    vld1.32   {acc2hi[1]}, [TMP2]
>     convert_0565_to_x888 acc2, reg3, reg2, reg1
>     vzip.u8   reg1, reg3
>     vzip.u8   reg2, reg4
> @@ -161,34 +157,30 @@ fname:
>                 xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
>                 yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
>
> -    mov       TMP2, X, asr #16
> +    mov       TMP1, X, asr #16
>     add       X, X, UX
> -    mov       TMP4, X, asr #16
> +    add       TMP1, TOP, TMP1, asl #1
> +    mov       TMP2, X, asr #16
>     add       X, X, UX
> -    add       TMP1, TOP, TMP2, asl #1
> -    add       TMP2, BOTTOM, TMP2, asl #1
> -    add       TMP3, TOP, TMP4, asl #1
> -    add       TMP4, BOTTOM, TMP4, asl #1
> -    vld1.32   {xacc2lo[0]}, [TMP1]
> -    vld1.32   {xacc2hi[0]}, [TMP3]
> -    vld1.32   {xacc2lo[1]}, [TMP2]
> -    vld1.32   {xacc2hi[1]}, [TMP4]
> +    add       TMP2, TOP, TMP2, asl #1
> +    vld1.32   {xacc2lo[0]}, [TMP1], STRIDE
> +    vld1.32   {xacc2hi[0]}, [TMP2], STRIDE
> +    vld1.32   {xacc2lo[1]}, [TMP1]
> +    vld1.32   {xacc2hi[1]}, [TMP2]
>     convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
> -    mov       TMP2, X, asr #16
> +    mov       TMP1, X, asr #16
>     add       X, X, UX
> -    mov       TMP4, X, asr #16
> +    add       TMP1, TOP, TMP1, asl #1
> +    mov       TMP2, X, asr #16
>     add       X, X, UX
> -    add       TMP1, TOP, TMP2, asl #1
> -    add       TMP2, BOTTOM, TMP2, asl #1
> -    add       TMP3, TOP, TMP4, asl #1
> -    add       TMP4, BOTTOM, TMP4, asl #1
> -    vld1.32   {yacc2lo[0]}, [TMP1]
> +    add       TMP2, TOP, TMP2, asl #1
> +    vld1.32   {yacc2lo[0]}, [TMP1], STRIDE
>     vzip.u8   xreg1, xreg3
> -    vld1.32   {yacc2hi[0]}, [TMP3]
> +    vld1.32   {yacc2hi[0]}, [TMP2], STRIDE
>     vzip.u8   xreg2, xreg4
> -    vld1.32   {yacc2lo[1]}, [TMP2]
> +    vld1.32   {yacc2lo[1]}, [TMP1]
>     vzip.u8   xreg3, xreg4
> -    vld1.32   {yacc2hi[1]}, [TMP4]
> +    vld1.32   {yacc2hi[1]}, [TMP2]
>     vzip.u8   xreg1, xreg2
>     convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
>     vmull.u8  xacc1, xreg1, d28
> @@ -237,7 +229,7 @@ fname:
>
>  /*
>  * Macros for loading mask pixels into register 'mask'.
> - * vdup must be done in somewhere else.
> + * vdup must be dlast in somewhere else.
>  */
>  .macro bilinear_load_mask_x numpix, mask
>  .endm
> @@ -261,7 +253,7 @@ fname:
>
>  /*
>  * Macros for loading destination pixels into register 'dst0' and 'dst1'.
> - * Interleave should be done somewhere else.
> + * Interleave should be dlast somewhere else.
>  */
>  .macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01
>  .endm
> @@ -303,7 +295,7 @@ fname:
>  * For two pixel case
>  *  (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
>  *  (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
> - * We can do some optimizations for this including one pixel cases.
> + * We can do some optimizations for this including last pixel cases.
>  */
>  .macro bilinear_duplicate_mask_x numpix, mask
>  .endm
> @@ -326,7 +318,7 @@ fname:
>
>  /*
>  * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
> - * Interleave should be done when maks is enabled or operator is 'over'.
> + * Interleave should be dlast when maks is enabled or operator is 'over'.
>  */
>  .macro bilinear_interleave src0, src1, dst0, dst1
>     vuzp.8      src0, src1
> @@ -497,8 +489,7 @@ fname:
>     bilinear_load_dst dst_fmt, op, 1, d18, d19, q9
>     vmull.u8  q1, d0, d28
>     vmlal.u8  q1, d1, d29
> -    vshr.u16  d30, d24, #8
> -    /* 4 cycles bubble */
> +    /* 5 cycles bubble */
>     vshll.u16 q0, d2, #8
>     vmlsl.u16 q0, d2, d30
>     vmlal.u16 q0, d3, d30
> @@ -525,18 +516,18 @@ fname:
>                 q1, q11, d0, d1, d20, d21, d22, d23
>     bilinear_load_mask mask_fmt, 2, d4
>     bilinear_load_dst dst_fmt, op, 2, d18, d19, q9
> -    vshr.u16  q15, q12, #8
> -    vadd.u16  q12, q12, q13
>     vshll.u16 q0, d2, #8
>     vmlsl.u16 q0, d2, d30
>     vmlal.u16 q0, d3, d30
>     vshll.u16 q10, d22, #8
>     vmlsl.u16 q10, d22, d31
>     vmlal.u16 q10, d23, d31
> -    vshrn.u32 d30, q0, #16
> -    vshrn.u32 d31, q10, #16
> +    vshrn.u32 d0, q0, #16
> +    vshrn.u32 d1, q10, #16
>     bilinear_duplicate_mask mask_fmt, 2, d4
> -    vmovn.u16 d0, q15
> +    vshr.u16  q15, q12, #8
> +    vadd.u16  q12, q12, q13
> +    vmovn.u16 d0, q0
>     bilinear_interleave_src_dst \
>                 mask_fmt, op, 2, d0, d1, q0, d18, d19, q9
>     bilinear_apply_mask_to_src \
> @@ -554,8 +545,7 @@ fname:
>                 q1, q11, d0, d1, d20, d21, d22, d23 \
>                 q3, q9,  d4, d5, d16, d17, d18, d19
>     pld       [TMP1, PF_OFFS]
> -    vshr.u16  q15, q12, #8
> -    vadd.u16  q12, q12, q13
> +    sub       TMP1, TMP1, STRIDE
>     vshll.u16 q0, d2, #8
>     vmlsl.u16 q0, d2, d30
>     vmlal.u16 q0, d3, d30
> @@ -567,7 +557,7 @@ fname:
>     vmlsl.u16 q2, d6, d30
>     vmlal.u16 q2, d7, d30
>     vshll.u16 q8, d18, #8
> -    bilinear_load_mask mask_fmt, 4, d30
> +    bilinear_load_mask mask_fmt, 4, d22
>     bilinear_load_dst dst_fmt, op, 4, d2, d3, q1
>     pld       [TMP2, PF_OFFS]
>     vmlsl.u16 q8, d18, d31
> @@ -577,17 +567,19 @@ fname:
>     vshrn.u32 d1, q10, #16
>     vshrn.u32 d4, q2, #16
>     vshrn.u32 d5, q8, #16
> -    bilinear_duplicate_mask mask_fmt, 4, d30
> +    bilinear_duplicate_mask mask_fmt, 4, d22
> +    vshr.u16  q15, q12, #8
>     vmovn.u16 d0, q0
>     vmovn.u16 d1, q2
> +    vadd.u16  q12, q12, q13
>     bilinear_interleave_src_dst \
>                 mask_fmt, op, 4, d0, d1, q0, d2, d3, q1
>     bilinear_apply_mask_to_src \
> -                mask_fmt, 4, d0, d1, q0, d30, \
> +                mask_fmt, 4, d0, d1, q0, d22, \
>                 q3, q8, q9, q10
>     bilinear_combine \
>                 op, 4, d0, d1, q0, d2, d3, q1, \
> -                q3, q8, q9, q10, d22
> +                q3, q8, q9, q10, d23
>     bilinear_deinterleave_dst mask_fmt, op, 4, d0, d1, q0
>     bilinear_store_&dst_fmt 4, q2, q3
>  .endm
> @@ -610,6 +602,7 @@ pixman_asm_function fname
>     PF_OFFS   .req      r7
>     TMP3      .req      r8
>     TMP4      .req      r9
> +    STRIDE    .req      r2
>
>     mov       ip, sp
>     push      {r4, r5, r6, r7, r8, r9}
> @@ -617,6 +610,9 @@ pixman_asm_function fname
>     ldmia     ip, {WB, X, UX, WIDTH}
>     mul       PF_OFFS, PF_OFFS, UX
>
> +    sub       STRIDE, BOTTOM, TOP
> +    .unreq    BOTTOM
> +
>     cmp       WIDTH, #0
>     ble       3f
>
> @@ -626,6 +622,8 @@ pixman_asm_function fname
>     vdup.u8   d29, WB
>     vadd.u16  d25, d25, d26
>     vadd.u16  q13, q13, q13
> +    vshr.u16  q15, q12, #8
> +    vadd.u16  q12, q12, q13
>
>     subs      WIDTH, WIDTH, #4
>     blt       1f
> @@ -648,7 +646,6 @@ pixman_asm_function fname
>
>     .unreq    OUT
>     .unreq    TOP
> -    .unreq    BOTTOM
>     .unreq    WT
>     .unreq    WB
>     .unreq    X
> @@ -659,6 +656,7 @@ pixman_asm_function fname
>     .unreq    PF_OFFS
>     .unreq    TMP3
>     .unreq    TMP4
> +    .unreq    STRIDE
>  .endfunc
>
>  .endm
> @@ -682,6 +680,7 @@ pixman_asm_function fname
>     PF_OFFS   .req      r8
>     TMP3      .req      r9
>     TMP4      .req      r10
> +    STRIDE    .req      r3
>
>     mov       ip, sp
>     push      {r4, r5, r6, r7, r8, r9, r10, ip}
> @@ -689,6 +688,9 @@ pixman_asm_function fname
>     ldmia     ip, {WT, WB, X, UX, WIDTH}
>     mul       PF_OFFS, PF_OFFS, UX
>
> +    sub       STRIDE, BOTTOM, TOP
> +    .unreq    BOTTOM
> +
>     cmp       WIDTH, #0
>     ble       3f
>
> @@ -698,6 +700,8 @@ pixman_asm_function fname
>     vdup.u8   d29, WB
>     vadd.u16  d25, d25, d26
>     vadd.u16  q13, q13, q13
> +    vshr.u16  q15, q12, #8
> +    vadd.u16  q12, q12, q13
>
>     subs      WIDTH, WIDTH, #4
>     blt       1f
> @@ -720,7 +724,6 @@ pixman_asm_function fname
>
>     .unreq    OUT
>     .unreq    TOP
> -    .unreq    BOTTOM
>     .unreq    WT
>     .unreq    WB
>     .unreq    X
> @@ -732,6 +735,7 @@ pixman_asm_function fname
>     .unreq    PF_OFFS
>     .unreq    TMP3
>     .unreq    TMP4
> +    .unreq    STRIDE
>  .endfunc
>
>  .endm
> --
> 1.7.1
>
> _______________________________________________
> Pixman mailing list
> Pixman at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/pixman
>


More information about the Pixman mailing list