diff options
Diffstat (limited to 'libvpx/vp8/encoder/arm/neon')
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm | 258 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/picklpf_arm.c | 46 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm | 221 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/subtract_neon.asm | 199 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm | 70 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm | 116 | ||||
-rw-r--r-- | libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm | 103 |
7 files changed, 1013 insertions, 0 deletions
diff --git a/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm new file mode 100644 index 0000000..1430588 --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm @@ -0,0 +1,258 @@ +; +; Copyright (c) 2011 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + + EXPORT |vp8_fast_quantize_b_neon| + EXPORT |vp8_fast_quantize_b_pair_neon| + + INCLUDE asm_enc_offsets.asm + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=4 + +;vp8_fast_quantize_b_pair_neon(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2); +|vp8_fast_quantize_b_pair_neon| PROC + + stmfd sp!, {r4-r9} + vstmdb sp!, {q4-q7} + + ldr r4, [r0, #vp8_block_coeff] + ldr r5, [r0, #vp8_block_quant_fast] + ldr r6, [r0, #vp8_block_round] + + vld1.16 {q0, q1}, [r4@128] ; load z + + ldr r7, [r2, #vp8_blockd_qcoeff] + + vabs.s16 q4, q0 ; calculate x = abs(z) + vabs.s16 q5, q1 + + ;right shift 15 to get sign, all 0 if it is positive, all 1 if it is negative + vshr.s16 q2, q0, #15 ; sz + vshr.s16 q3, q1, #15 + + vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15] + vld1.s16 {q8, q9}, [r5@128] ; load quant_ptr [0-15] + + ldr r4, [r1, #vp8_block_coeff] + + vadd.s16 q4, q6 ; x + Round + vadd.s16 q5, q7 + + vld1.16 {q0, q1}, [r4@128] ; load z2 + + vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16 + vqdmulh.s16 q5, q9 + + vabs.s16 q10, q0 ; calculate x2 = abs(z_2) + vabs.s16 q11, q1 + vshr.s16 q12, q0, #15 ; sz2 + vshr.s16 q13, q1, #15 + + ;modify data to have its original sign + veor.s16 q4, q2 ; y^sz + veor.s16 q5, q3 + + vadd.s16 q10, q6 ; x2 + Round + vadd.s16 q11, q7 + + ldr r8, [r2, #vp8_blockd_dequant] + + vqdmulh.s16 q10, q8 ; y2 = ((Round+abs(z)) * Quant) >> 16 + vqdmulh.s16 q11, q9 + + vshr.s16 q4, #1 ; right shift 1 after vqdmulh + vshr.s16 q5, #1 + + vld1.s16 {q6, q7}, [r8@128] ;load dequant_ptr[i] + + vsub.s16 q4, q2 ; x1=(y^sz)-sz = (y^sz)-(-1) (2's complement) + vsub.s16 q5, q3 + + vshr.s16 q10, #1 ; right shift 1 after vqdmulh + vshr.s16 q11, #1 + + ldr r9, [r2, #vp8_blockd_dqcoeff] + + veor.s16 q10, q12 ; y2^sz2 + veor.s16 q11, q13 + + vst1.s16 {q4, q5}, [r7] ; store: qcoeff = x1 + + + vsub.s16 q10, q12 ; x2=(y^sz)-sz = (y^sz)-(-1) (2's complement) + vsub.s16 q11, q13 + + ldr r6, [r3, #vp8_blockd_qcoeff] + + vmul.s16 q2, q6, q4 ; x * Dequant + vmul.s16 q3, q7, q5 + + adr r0, inv_zig_zag ; load ptr of inverse zigzag table + + vceq.s16 q8, q8 ; set q8 to all 1 + + vst1.s16 {q10, q11}, [r6] ; store: qcoeff = x2 + + vmul.s16 q12, q6, q10 ; x2 * Dequant + vmul.s16 q13, q7, q11 + + vld1.16 {q6, q7}, [r0@128] ; load inverse scan order + + vtst.16 q14, q4, q8 ; now find eob + vtst.16 q15, q5, q8 ; non-zero element is set to all 1 + + vst1.s16 {q2, q3}, [r9] ; store dqcoeff = x * Dequant + + ldr r7, [r3, #vp8_blockd_dqcoeff] + + vand q0, q6, q14 ; get all valid numbers from scan array + vand q1, q7, q15 + + vst1.s16 {q12, q13}, [r7] ; store dqcoeff = x * Dequant + + vtst.16 q2, q10, q8 ; now find eob + vtst.16 q3, q11, q8 ; non-zero element is set to all 1 + + vmax.u16 q0, q0, q1 ; find maximum value in q0, q1 + + vand q10, q6, q2 ; get all valid numbers from scan array + vand q11, q7, q3 + vmax.u16 q10, q10, q11 ; find maximum value in q10, q11 + + vmax.u16 d0, d0, d1 + vmax.u16 d20, d20, d21 + vmovl.u16 q0, d0 + vmovl.u16 q10, d20 + + vmax.u32 d0, d0, d1 + vmax.u32 d20, d20, d21 + vpmax.u32 d0, d0, d0 + vpmax.u32 d20, d20, d20 + + ldr r4, [r2, #vp8_blockd_eob] + ldr r5, [r3, #vp8_blockd_eob] + + vst1.8 {d0[0]}, [r4] ; store eob + vst1.8 {d20[0]}, [r5] ; store eob + + vldmia sp!, {q4-q7} + ldmfd sp!, {r4-r9} + bx lr + + ENDP + +;void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) +|vp8_fast_quantize_b_neon| PROC + + stmfd sp!, {r4-r7} + + ldr r3, [r0, #vp8_block_coeff] + ldr r4, [r0, #vp8_block_quant_fast] + ldr r5, [r0, #vp8_block_round] + + vld1.16 {q0, q1}, [r3@128] ; load z + vorr.s16 q14, q0, q1 ; check if all zero (step 1) + ldr r6, [r1, #vp8_blockd_qcoeff] + ldr r7, [r1, #vp8_blockd_dqcoeff] + vorr.s16 d28, d28, d29 ; check if all zero (step 2) + + vabs.s16 q12, q0 ; calculate x = abs(z) + vabs.s16 q13, q1 + + ;right shift 15 to get sign, all 0 if it is positive, all 1 if it is negative + vshr.s16 q2, q0, #15 ; sz + vmov r2, r3, d28 ; check if all zero (step 3) + vshr.s16 q3, q1, #15 + + vld1.s16 {q14, q15}, [r5@128]; load round_ptr [0-15] + vld1.s16 {q8, q9}, [r4@128] ; load quant_ptr [0-15] + + vadd.s16 q12, q14 ; x + Round + vadd.s16 q13, q15 + + adr r0, inv_zig_zag ; load ptr of inverse zigzag table + + vqdmulh.s16 q12, q8 ; y = ((Round+abs(z)) * Quant) >> 16 + vqdmulh.s16 q13, q9 + + vld1.16 {q10, q11}, [r0@128]; load inverse scan order + + vceq.s16 q8, q8 ; set q8 to all 1 + + ldr r4, [r1, #vp8_blockd_dequant] + + vshr.s16 q12, #1 ; right shift 1 after vqdmulh + vshr.s16 q13, #1 + + ldr r5, [r1, #vp8_blockd_eob] + + orr r2, r2, r3 ; check if all zero (step 4) + cmp r2, #0 ; check if all zero (step 5) + beq zero_output ; check if all zero (step 6) + + ;modify data to have its original sign + veor.s16 q12, q2 ; y^sz + veor.s16 q13, q3 + + vsub.s16 q12, q2 ; x1=(y^sz)-sz = (y^sz)-(-1) (2's complement) + vsub.s16 q13, q3 + + vld1.s16 {q2, q3}, [r4@128] ; load dequant_ptr[i] + + vtst.16 q14, q12, q8 ; now find eob + vtst.16 q15, q13, q8 ; non-zero element is set to all 1 + + vst1.s16 {q12, q13}, [r6@128]; store: qcoeff = x1 + + vand q10, q10, q14 ; get all valid numbers from scan array + vand q11, q11, q15 + + + vmax.u16 q0, q10, q11 ; find maximum value in q0, q1 + vmax.u16 d0, d0, d1 + vmovl.u16 q0, d0 + + vmul.s16 q2, q12 ; x * Dequant + vmul.s16 q3, q13 + + vmax.u32 d0, d0, d1 + vpmax.u32 d0, d0, d0 + + vst1.s16 {q2, q3}, [r7@128] ; store dqcoeff = x * Dequant + + vst1.8 {d0[0]}, [r5] ; store eob + + ldmfd sp!, {r4-r7} + bx lr + +zero_output + strb r2, [r5] ; store eob + vst1.s16 {q0, q1}, [r6@128] ; qcoeff = 0 + vst1.s16 {q0, q1}, [r7@128] ; dqcoeff = 0 + + ldmfd sp!, {r4-r7} + bx lr + + ENDP + +; default inverse zigzag table is defined in vp8/common/entropy.c + ALIGN 16 ; enable use of @128 bit aligned loads +inv_zig_zag + DCW 0x0001, 0x0002, 0x0006, 0x0007 + DCW 0x0003, 0x0005, 0x0008, 0x000d + DCW 0x0004, 0x0009, 0x000c, 0x000e + DCW 0x000a, 0x000b, 0x000f, 0x0010 + + END + diff --git a/libvpx/vp8/encoder/arm/neon/picklpf_arm.c b/libvpx/vp8/encoder/arm/neon/picklpf_arm.c new file mode 100644 index 0000000..ec8071e --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/picklpf_arm.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "vp8/common/loopfilter.h" +#include "vpx_scale/yv12config.h" + +extern void vp8_memcpy_partial_neon(unsigned char *dst_ptr, + unsigned char *src_ptr, + int sz); + + +void vp8_yv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, + YV12_BUFFER_CONFIG *dst_ybc) +{ + unsigned char *src_y, *dst_y; + int yheight; + int ystride; + int yoffset; + int linestocopy; + + yheight = src_ybc->y_height; + ystride = src_ybc->y_stride; + + /* number of MB rows to use in partial filtering */ + linestocopy = (yheight >> 4) / PARTIAL_FRAME_FRACTION; + linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ + + /* Copy extra 4 so that full filter context is available if filtering done + * on the copied partial frame and not original. Partial filter does mb + * filtering for top row also, which can modify3 pixels above. + */ + linestocopy += 4; + /* partial image starts at ~middle of frame (macroblock border) */ + yoffset = ystride * (((yheight >> 5) * 16) - 4); + src_y = src_ybc->y_buffer + yoffset; + dst_y = dst_ybc->y_buffer + yoffset; + + vp8_memcpy_partial_neon(dst_y, src_y, ystride * linestocopy); +} diff --git a/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm new file mode 100644 index 0000000..09dd011 --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm @@ -0,0 +1,221 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + + EXPORT |vp8_short_fdct4x4_neon| + EXPORT |vp8_short_fdct8x4_neon| + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=4 + + + ALIGN 16 ; enable use of @128 bit aligned loads +coeff + DCW 5352, 5352, 5352, 5352 + DCW 2217, 2217, 2217, 2217 + DCD 14500, 14500, 14500, 14500 + DCD 7500, 7500, 7500, 7500 + DCD 12000, 12000, 12000, 12000 + DCD 51000, 51000, 51000, 51000 + +;void vp8_short_fdct4x4_c(short *input, short *output, int pitch) +|vp8_short_fdct4x4_neon| PROC + + ; Part one + vld1.16 {d0}, [r0@64], r2 + adr r12, coeff + vld1.16 {d1}, [r0@64], r2 + vld1.16 {q8}, [r12@128]! ; d16=5352, d17=2217 + vld1.16 {d2}, [r0@64], r2 + vld1.32 {q9, q10}, [r12@128]! ; q9=14500, q10=7500 + vld1.16 {d3}, [r0@64], r2 + + ; transpose d0=ip[0], d1=ip[1], d2=ip[2], d3=ip[3] + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vld1.32 {q11,q12}, [r12@128] ; q11=12000, q12=51000 + vtrn.16 d0, d1 + vtrn.16 d2, d3 + + vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[3] + vadd.s16 d5, d1, d2 ; b1 = ip[1] + ip[2] + vsub.s16 d6, d1, d2 ; c1 = ip[1] - ip[2] + vsub.s16 d7, d0, d3 ; d1 = ip[0] - ip[3] + + vshl.s16 q2, q2, #3 ; (a1, b1) << 3 + vshl.s16 q3, q3, #3 ; (c1, d1) << 3 + + vadd.s16 d0, d4, d5 ; op[0] = a1 + b1 + vsub.s16 d2, d4, d5 ; op[2] = a1 - b1 + + vmlal.s16 q9, d7, d16 ; d1*5352 + 14500 + vmlal.s16 q10, d7, d17 ; d1*2217 + 7500 + vmlal.s16 q9, d6, d17 ; c1*2217 + d1*5352 + 14500 + vmlsl.s16 q10, d6, d16 ; d1*2217 - c1*5352 + 7500 + + vshrn.s32 d1, q9, #12 ; op[1] = (c1*2217 + d1*5352 + 14500)>>12 + vshrn.s32 d3, q10, #12 ; op[3] = (d1*2217 - c1*5352 + 7500)>>12 + + + ; Part two + + ; transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12] + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vtrn.16 d0, d1 + vtrn.16 d2, d3 + + vmov.s16 d26, #7 + + vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[12] + vadd.s16 d5, d1, d2 ; b1 = ip[4] + ip[8] + vsub.s16 d6, d1, d2 ; c1 = ip[4] - ip[8] + vadd.s16 d4, d4, d26 ; a1 + 7 + vsub.s16 d7, d0, d3 ; d1 = ip[0] - ip[12] + + vadd.s16 d0, d4, d5 ; op[0] = a1 + b1 + 7 + vsub.s16 d2, d4, d5 ; op[8] = a1 - b1 + 7 + + vmlal.s16 q11, d7, d16 ; d1*5352 + 12000 + vmlal.s16 q12, d7, d17 ; d1*2217 + 51000 + + vceq.s16 d4, d7, #0 + + vshr.s16 d0, d0, #4 + vshr.s16 d2, d2, #4 + + vmlal.s16 q11, d6, d17 ; c1*2217 + d1*5352 + 12000 + vmlsl.s16 q12, d6, d16 ; d1*2217 - c1*5352 + 51000 + + vmvn.s16 d4, d4 + vshrn.s32 d1, q11, #16 ; op[4] = (c1*2217 + d1*5352 + 12000)>>16 + vsub.s16 d1, d1, d4 ; op[4] += (d1!=0) + vshrn.s32 d3, q12, #16 ; op[12]= (d1*2217 - c1*5352 + 51000)>>16 + + vst1.16 {q0, q1}, [r1@128] + + bx lr + + ENDP + +;void vp8_short_fdct8x4_c(short *input, short *output, int pitch) +|vp8_short_fdct8x4_neon| PROC + + ; Part one + + vld1.16 {q0}, [r0@128], r2 + adr r12, coeff + vld1.16 {q1}, [r0@128], r2 + vld1.16 {q8}, [r12@128]! ; d16=5352, d17=2217 + vld1.16 {q2}, [r0@128], r2 + vld1.32 {q9, q10}, [r12@128]! ; q9=14500, q10=7500 + vld1.16 {q3}, [r0@128], r2 + + ; transpose q0=ip[0], q1=ip[1], q2=ip[2], q3=ip[3] + vtrn.32 q0, q2 ; [A0|B0] + vtrn.32 q1, q3 ; [A1|B1] + vtrn.16 q0, q1 ; [A2|B2] + vtrn.16 q2, q3 ; [A3|B3] + + vadd.s16 q11, q0, q3 ; a1 = ip[0] + ip[3] + vadd.s16 q12, q1, q2 ; b1 = ip[1] + ip[2] + vsub.s16 q13, q1, q2 ; c1 = ip[1] - ip[2] + vsub.s16 q14, q0, q3 ; d1 = ip[0] - ip[3] + + vshl.s16 q11, q11, #3 ; a1 << 3 + vshl.s16 q12, q12, #3 ; b1 << 3 + vshl.s16 q13, q13, #3 ; c1 << 3 + vshl.s16 q14, q14, #3 ; d1 << 3 + + vadd.s16 q0, q11, q12 ; [A0 | B0] = a1 + b1 + vsub.s16 q2, q11, q12 ; [A2 | B2] = a1 - b1 + + vmov.s16 q11, q9 ; 14500 + vmov.s16 q12, q10 ; 7500 + + vmlal.s16 q9, d28, d16 ; A[1] = d1*5352 + 14500 + vmlal.s16 q10, d28, d17 ; A[3] = d1*2217 + 7500 + vmlal.s16 q11, d29, d16 ; B[1] = d1*5352 + 14500 + vmlal.s16 q12, d29, d17 ; B[3] = d1*2217 + 7500 + + vmlal.s16 q9, d26, d17 ; A[1] = c1*2217 + d1*5352 + 14500 + vmlsl.s16 q10, d26, d16 ; A[3] = d1*2217 - c1*5352 + 7500 + vmlal.s16 q11, d27, d17 ; B[1] = c1*2217 + d1*5352 + 14500 + vmlsl.s16 q12, d27, d16 ; B[3] = d1*2217 - c1*5352 + 7500 + + vshrn.s32 d2, q9, #12 ; A[1] = (c1*2217 + d1*5352 + 14500)>>12 + vshrn.s32 d6, q10, #12 ; A[3] = (d1*2217 - c1*5352 + 7500)>>12 + vshrn.s32 d3, q11, #12 ; B[1] = (c1*2217 + d1*5352 + 14500)>>12 + vshrn.s32 d7, q12, #12 ; B[3] = (d1*2217 - c1*5352 + 7500)>>12 + + + ; Part two + vld1.32 {q9,q10}, [r12@128] ; q9=12000, q10=51000 + + ; transpose q0=ip[0], q1=ip[4], q2=ip[8], q3=ip[12] + vtrn.32 q0, q2 ; q0=[A0 | B0] + vtrn.32 q1, q3 ; q1=[A4 | B4] + vtrn.16 q0, q1 ; q2=[A8 | B8] + vtrn.16 q2, q3 ; q3=[A12|B12] + + vmov.s16 q15, #7 + + vadd.s16 q11, q0, q3 ; a1 = ip[0] + ip[12] + vadd.s16 q12, q1, q2 ; b1 = ip[4] + ip[8] + vadd.s16 q11, q11, q15 ; a1 + 7 + vsub.s16 q13, q1, q2 ; c1 = ip[4] - ip[8] + vsub.s16 q14, q0, q3 ; d1 = ip[0] - ip[12] + + vadd.s16 q0, q11, q12 ; a1 + b1 + 7 + vsub.s16 q1, q11, q12 ; a1 - b1 + 7 + + vmov.s16 q11, q9 ; 12000 + vmov.s16 q12, q10 ; 51000 + + vshr.s16 d0, d0, #4 ; A[0] = (a1 + b1 + 7)>>4 + vshr.s16 d4, d1, #4 ; B[0] = (a1 + b1 + 7)>>4 + vshr.s16 d2, d2, #4 ; A[8] = (a1 + b1 + 7)>>4 + vshr.s16 d6, d3, #4 ; B[8] = (a1 + b1 + 7)>>4 + + + vmlal.s16 q9, d28, d16 ; A[4] = d1*5352 + 12000 + vmlal.s16 q10, d28, d17 ; A[12] = d1*2217 + 51000 + vmlal.s16 q11, d29, d16 ; B[4] = d1*5352 + 12000 + vmlal.s16 q12, d29, d17 ; B[12] = d1*2217 + 51000 + + vceq.s16 q14, q14, #0 + + vmlal.s16 q9, d26, d17 ; A[4] = c1*2217 + d1*5352 + 12000 + vmlsl.s16 q10, d26, d16 ; A[12] = d1*2217 - c1*5352 + 51000 + vmlal.s16 q11, d27, d17 ; B[4] = c1*2217 + d1*5352 + 12000 + vmlsl.s16 q12, d27, d16 ; B[12] = d1*2217 - c1*5352 + 51000 + + vmvn.s16 q14, q14 + + vshrn.s32 d1, q9, #16 ; A[4] = (c1*2217 + d1*5352 + 12000)>>16 + vshrn.s32 d3, q10, #16 ; A[12]= (d1*2217 - c1*5352 + 51000)>>16 + vsub.s16 d1, d1, d28 ; A[4] += (d1!=0) + + vshrn.s32 d5, q11, #16 ; B[4] = (c1*2217 + d1*5352 + 12000)>>16 + vshrn.s32 d7, q12, #16 ; B[12]= (d1*2217 - c1*5352 + 51000)>>16 + vsub.s16 d5, d5, d29 ; B[4] += (d1!=0) + + vst1.16 {q0, q1}, [r1@128]! ; block A + vst1.16 {q2, q3}, [r1@128]! ; block B + + bx lr + + ENDP + + END + diff --git a/libvpx/vp8/encoder/arm/neon/subtract_neon.asm b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm new file mode 100644 index 0000000..91a328c --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm @@ -0,0 +1,199 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + EXPORT |vp8_subtract_b_neon| + EXPORT |vp8_subtract_mby_neon| + EXPORT |vp8_subtract_mbuv_neon| + + INCLUDE asm_enc_offsets.asm + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=2 + +;void vp8_subtract_b_neon(BLOCK *be, BLOCKD *bd, int pitch) +|vp8_subtract_b_neon| PROC + + stmfd sp!, {r4-r7} + + ldr r3, [r0, #vp8_block_base_src] + ldr r4, [r0, #vp8_block_src] + ldr r5, [r0, #vp8_block_src_diff] + ldr r3, [r3] + ldr r6, [r0, #vp8_block_src_stride] + add r3, r3, r4 ; src = *base_src + src + ldr r7, [r1, #vp8_blockd_predictor] + + vld1.8 {d0}, [r3], r6 ;load src + vld1.8 {d1}, [r7], r2 ;load pred + vld1.8 {d2}, [r3], r6 + vld1.8 {d3}, [r7], r2 + vld1.8 {d4}, [r3], r6 + vld1.8 {d5}, [r7], r2 + vld1.8 {d6}, [r3], r6 + vld1.8 {d7}, [r7], r2 + + vsubl.u8 q10, d0, d1 + vsubl.u8 q11, d2, d3 + vsubl.u8 q12, d4, d5 + vsubl.u8 q13, d6, d7 + + mov r2, r2, lsl #1 + + vst1.16 {d20}, [r5], r2 ;store diff + vst1.16 {d22}, [r5], r2 + vst1.16 {d24}, [r5], r2 + vst1.16 {d26}, [r5], r2 + + ldmfd sp!, {r4-r7} + bx lr + + ENDP + + +;========================================== +;void vp8_subtract_mby_neon(short *diff, unsigned char *src, int src_stride +; unsigned char *pred, int pred_stride) +|vp8_subtract_mby_neon| PROC + push {r4-r7} + mov r12, #4 + ldr r4, [sp, #16] ; pred_stride + mov r6, #32 ; "diff" stride x2 + add r5, r0, #16 ; second diff pointer + +subtract_mby_loop + vld1.8 {q0}, [r1], r2 ;load src + vld1.8 {q1}, [r3], r4 ;load pred + vld1.8 {q2}, [r1], r2 + vld1.8 {q3}, [r3], r4 + vld1.8 {q4}, [r1], r2 + vld1.8 {q5}, [r3], r4 + vld1.8 {q6}, [r1], r2 + vld1.8 {q7}, [r3], r4 + + vsubl.u8 q8, d0, d2 + vsubl.u8 q9, d1, d3 + vsubl.u8 q10, d4, d6 + vsubl.u8 q11, d5, d7 + vsubl.u8 q12, d8, d10 + vsubl.u8 q13, d9, d11 + vsubl.u8 q14, d12, d14 + vsubl.u8 q15, d13, d15 + + vst1.16 {q8}, [r0], r6 ;store diff + vst1.16 {q9}, [r5], r6 + vst1.16 {q10}, [r0], r6 + vst1.16 {q11}, [r5], r6 + vst1.16 {q12}, [r0], r6 + vst1.16 {q13}, [r5], r6 + vst1.16 {q14}, [r0], r6 + vst1.16 {q15}, [r5], r6 + + subs r12, r12, #1 + bne subtract_mby_loop + + pop {r4-r7} + bx lr + ENDP + +;================================= +;void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, +; int src_stride, unsigned char *upred, +; unsigned char *vpred, int pred_stride) + +|vp8_subtract_mbuv_neon| PROC + push {r4-r7} + ldr r4, [sp, #16] ; upred + ldr r5, [sp, #20] ; vpred + ldr r6, [sp, #24] ; pred_stride + add r0, r0, #512 ; short *udiff = diff + 256; + mov r12, #32 ; "diff" stride x2 + add r7, r0, #16 ; second diff pointer + +;u + vld1.8 {d0}, [r1], r3 ;load usrc + vld1.8 {d1}, [r4], r6 ;load upred + vld1.8 {d2}, [r1], r3 + vld1.8 {d3}, [r4], r6 + vld1.8 {d4}, [r1], r3 + vld1.8 {d5}, [r4], r6 + vld1.8 {d6}, [r1], r3 + vld1.8 {d7}, [r4], r6 + vld1.8 {d8}, [r1], r3 + vld1.8 {d9}, [r4], r6 + vld1.8 {d10}, [r1], r3 + vld1.8 {d11}, [r4], r6 + vld1.8 {d12}, [r1], r3 + vld1.8 {d13}, [r4], r6 + vld1.8 {d14}, [r1], r3 + vld1.8 {d15}, [r4], r6 + + vsubl.u8 q8, d0, d1 + vsubl.u8 q9, d2, d3 + vsubl.u8 q10, d4, d5 + vsubl.u8 q11, d6, d7 + vsubl.u8 q12, d8, d9 + vsubl.u8 q13, d10, d11 + vsubl.u8 q14, d12, d13 + vsubl.u8 q15, d14, d15 + + vst1.16 {q8}, [r0], r12 ;store diff + vst1.16 {q9}, [r7], r12 + vst1.16 {q10}, [r0], r12 + vst1.16 {q11}, [r7], r12 + vst1.16 {q12}, [r0], r12 + vst1.16 {q13}, [r7], r12 + vst1.16 {q14}, [r0], r12 + vst1.16 {q15}, [r7], r12 + +;v + vld1.8 {d0}, [r2], r3 ;load vsrc + vld1.8 {d1}, [r5], r6 ;load vpred + vld1.8 {d2}, [r2], r3 + vld1.8 {d3}, [r5], r6 + vld1.8 {d4}, [r2], r3 + vld1.8 {d5}, [r5], r6 + vld1.8 {d6}, [r2], r3 + vld1.8 {d7}, [r5], r6 + vld1.8 {d8}, [r2], r3 + vld1.8 {d9}, [r5], r6 + vld1.8 {d10}, [r2], r3 + vld1.8 {d11}, [r5], r6 + vld1.8 {d12}, [r2], r3 + vld1.8 {d13}, [r5], r6 + vld1.8 {d14}, [r2], r3 + vld1.8 {d15}, [r5], r6 + + vsubl.u8 q8, d0, d1 + vsubl.u8 q9, d2, d3 + vsubl.u8 q10, d4, d5 + vsubl.u8 q11, d6, d7 + vsubl.u8 q12, d8, d9 + vsubl.u8 q13, d10, d11 + vsubl.u8 q14, d12, d13 + vsubl.u8 q15, d14, d15 + + vst1.16 {q8}, [r0], r12 ;store diff + vst1.16 {q9}, [r7], r12 + vst1.16 {q10}, [r0], r12 + vst1.16 {q11}, [r7], r12 + vst1.16 {q12}, [r0], r12 + vst1.16 {q13}, [r7], r12 + vst1.16 {q14}, [r0], r12 + vst1.16 {q15}, [r7], r12 + + pop {r4-r7} + bx lr + + ENDP + + END diff --git a/libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm b/libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm new file mode 100644 index 0000000..5b9f11e --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm @@ -0,0 +1,70 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + + EXPORT |vp8_memcpy_partial_neon| + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=2 +;========================================= +;this is not a full memcpy function!!! +;void vp8_memcpy_partial_neon(unsigned char *dst_ptr, unsigned char *src_ptr, +; int sz); +|vp8_memcpy_partial_neon| PROC + ;pld [r1] ;preload pred data + ;pld [r1, #128] + ;pld [r1, #256] + ;pld [r1, #384] + + mov r12, r2, lsr #8 ;copy 256 bytes data at one time + +memcpy_neon_loop + vld1.8 {q0, q1}, [r1]! ;load src data + subs r12, r12, #1 + vld1.8 {q2, q3}, [r1]! + vst1.8 {q0, q1}, [r0]! ;copy to dst_ptr + vld1.8 {q4, q5}, [r1]! + vst1.8 {q2, q3}, [r0]! + vld1.8 {q6, q7}, [r1]! + vst1.8 {q4, q5}, [r0]! + vld1.8 {q8, q9}, [r1]! + vst1.8 {q6, q7}, [r0]! + vld1.8 {q10, q11}, [r1]! + vst1.8 {q8, q9}, [r0]! + vld1.8 {q12, q13}, [r1]! + vst1.8 {q10, q11}, [r0]! + vld1.8 {q14, q15}, [r1]! + vst1.8 {q12, q13}, [r0]! + vst1.8 {q14, q15}, [r0]! + + ;pld [r1] ;preload pred data -- need to adjust for real device + ;pld [r1, #128] + ;pld [r1, #256] + ;pld [r1, #384] + + bne memcpy_neon_loop + + ands r3, r2, #0xff ;extra copy + beq done_copy_neon_loop + +extra_copy_neon_loop + vld1.8 {q0}, [r1]! ;load src data + subs r3, r3, #16 + vst1.8 {q0}, [r0]! + bne extra_copy_neon_loop + +done_copy_neon_loop + bx lr + ENDP + + END diff --git a/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm b/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm new file mode 100644 index 0000000..55edbf5 --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm @@ -0,0 +1,116 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + + EXPORT |vp8_mse16x16_neon| + EXPORT |vp8_get4x4sse_cs_neon| + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=2 +;============================ +; r0 unsigned char *src_ptr +; r1 int source_stride +; r2 unsigned char *ref_ptr +; r3 int recon_stride +; stack unsigned int *sse +;note: in this function, sum is never used. So, we can remove this part of calculation +;from vp8_variance(). + +|vp8_mse16x16_neon| PROC + vmov.i8 q7, #0 ;q7, q8, q9, q10 - sse + vmov.i8 q8, #0 + vmov.i8 q9, #0 + vmov.i8 q10, #0 + + mov r12, #8 + +mse16x16_neon_loop + vld1.8 {q0}, [r0], r1 ;Load up source and reference + vld1.8 {q2}, [r2], r3 + vld1.8 {q1}, [r0], r1 + vld1.8 {q3}, [r2], r3 + + vsubl.u8 q11, d0, d4 + vsubl.u8 q12, d1, d5 + vsubl.u8 q13, d2, d6 + vsubl.u8 q14, d3, d7 + + vmlal.s16 q7, d22, d22 + vmlal.s16 q8, d23, d23 + + subs r12, r12, #1 + + vmlal.s16 q9, d24, d24 + vmlal.s16 q10, d25, d25 + vmlal.s16 q7, d26, d26 + vmlal.s16 q8, d27, d27 + vmlal.s16 q9, d28, d28 + vmlal.s16 q10, d29, d29 + + bne mse16x16_neon_loop + + vadd.u32 q7, q7, q8 + vadd.u32 q9, q9, q10 + + ldr r12, [sp] ;load *sse from stack + + vadd.u32 q10, q7, q9 + vpaddl.u32 q1, q10 + vadd.u64 d0, d2, d3 + + vst1.32 {d0[0]}, [r12] + vmov.32 r0, d0[0] + + bx lr + + ENDP + + +;============================= +; r0 unsigned char *src_ptr, +; r1 int source_stride, +; r2 unsigned char *ref_ptr, +; r3 int recon_stride +|vp8_get4x4sse_cs_neon| PROC + vld1.8 {d0}, [r0], r1 ;Load up source and reference + vld1.8 {d4}, [r2], r3 + vld1.8 {d1}, [r0], r1 + vld1.8 {d5}, [r2], r3 + vld1.8 {d2}, [r0], r1 + vld1.8 {d6}, [r2], r3 + vld1.8 {d3}, [r0], r1 + vld1.8 {d7}, [r2], r3 + + vsubl.u8 q11, d0, d4 + vsubl.u8 q12, d1, d5 + vsubl.u8 q13, d2, d6 + vsubl.u8 q14, d3, d7 + + vmull.s16 q7, d22, d22 + vmull.s16 q8, d24, d24 + vmull.s16 q9, d26, d26 + vmull.s16 q10, d28, d28 + + vadd.u32 q7, q7, q8 + vadd.u32 q9, q9, q10 + vadd.u32 q9, q7, q9 + + vpaddl.u32 q1, q9 + vadd.u64 d0, d2, d3 + + vmov.32 r0, d0[0] + bx lr + + ENDP + + END diff --git a/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm b/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm new file mode 100644 index 0000000..2226629 --- /dev/null +++ b/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm @@ -0,0 +1,103 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + + EXPORT |vp8_short_walsh4x4_neon| + + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=2 +;void vp8_short_walsh4x4_neon(short *input, short *output, int pitch) +; r0 short *input, +; r1 short *output, +; r2 int pitch +|vp8_short_walsh4x4_neon| PROC + + vld1.16 {d0}, [r0@64], r2 ; load input + vld1.16 {d1}, [r0@64], r2 + vld1.16 {d2}, [r0@64], r2 + vld1.16 {d3}, [r0@64] + + ;First for-loop + ;transpose d0, d1, d2, d3. Then, d0=ip[0], d1=ip[1], d2=ip[2], d3=ip[3] + vtrn.32 d0, d2 + vtrn.32 d1, d3 + + vmov.s32 q15, #3 ; add 3 to all values + + vtrn.16 d0, d1 + vtrn.16 d2, d3 + + vadd.s16 d4, d0, d2 ; ip[0] + ip[2] + vadd.s16 d5, d1, d3 ; ip[1] + ip[3] + vsub.s16 d6, d1, d3 ; ip[1] - ip[3] + vsub.s16 d7, d0, d2 ; ip[0] - ip[2] + + vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2 + vshl.s16 d5, d5, #2 ; d1 = (ip[1] + ip[3]) << 2 + vshl.s16 d6, d6, #2 ; c1 = (ip[1] - ip[3]) << 2 + vceq.s16 d16, d4, #0 ; a1 == 0 + vshl.s16 d7, d7, #2 ; b1 = (ip[0] - ip[2]) << 2 + + vadd.s16 d0, d4, d5 ; a1 + d1 + vmvn d16, d16 ; a1 != 0 + vsub.s16 d3, d4, d5 ; op[3] = a1 - d1 + vadd.s16 d1, d7, d6 ; op[1] = b1 + c1 + vsub.s16 d2, d7, d6 ; op[2] = b1 - c1 + vsub.s16 d0, d0, d16 ; op[0] = a1 + d1 + (a1 != 0) + + ;Second for-loop + ;transpose d0, d1, d2, d3, Then, d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12] + vtrn.32 d1, d3 + vtrn.32 d0, d2 + vtrn.16 d2, d3 + vtrn.16 d0, d1 + + vaddl.s16 q8, d0, d2 ; a1 = ip[0]+ip[8] + vaddl.s16 q9, d1, d3 ; d1 = ip[4]+ip[12] + vsubl.s16 q10, d1, d3 ; c1 = ip[4]-ip[12] + vsubl.s16 q11, d0, d2 ; b1 = ip[0]-ip[8] + + vadd.s32 q0, q8, q9 ; a2 = a1 + d1 + vadd.s32 q1, q11, q10 ; b2 = b1 + c1 + vsub.s32 q2, q11, q10 ; c2 = b1 - c1 + vsub.s32 q3, q8, q9 ; d2 = a1 - d1 + + vclt.s32 q8, q0, #0 + vclt.s32 q9, q1, #0 + vclt.s32 q10, q2, #0 + vclt.s32 q11, q3, #0 + + ; subtract -1 (or 0) + vsub.s32 q0, q0, q8 ; a2 += a2 < 0 + vsub.s32 q1, q1, q9 ; b2 += b2 < 0 + vsub.s32 q2, q2, q10 ; c2 += c2 < 0 + vsub.s32 q3, q3, q11 ; d2 += d2 < 0 + + vadd.s32 q8, q0, q15 ; a2 + 3 + vadd.s32 q9, q1, q15 ; b2 + 3 + vadd.s32 q10, q2, q15 ; c2 + 3 + vadd.s32 q11, q3, q15 ; d2 + 3 + + ; vrshrn? would add 1 << 3-1 = 2 + vshrn.s32 d0, q8, #3 + vshrn.s32 d1, q9, #3 + vshrn.s32 d2, q10, #3 + vshrn.s32 d3, q11, #3 + + vst1.16 {q0, q1}, [r1@128] + + bx lr + + ENDP + + END |